diff options
797 files changed, 13735 insertions, 4372 deletions
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index db7aab1516de..b8d0a30f1644 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -192,3 +192,14 @@ Date: November 2017 Contact: "Sheng Yong" <shengyong1@huawei.com> Description: Controls readahead inode block in readdir. + +What: /sys/fs/f2fs/<disk>/extension_list +Date: Feburary 2018 +Contact: "Chao Yu" <yuchao0@huawei.com> +Description: + Used to control configure extension list: + - Query: cat /sys/fs/f2fs/<disk>/extension_list + - Add: echo '[h/c]extension' > /sys/fs/f2fs/<disk>/extension_list + - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list + - [h] means add/del hot file extension + - [c] means add/del cold file extension diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt index caf297bee1fb..c28d4eb83b76 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt @@ -35,6 +35,15 @@ Optional properties: - ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode. Selection primary or secondary function associated to GPADC_START and SYSEN2 pin/pad for DVFS2 interface +- ti,palmas-override-powerhold: This is applicable for PMICs for which + GPIO7 is configured in POWERHOLD mode which has higher priority + over DEV_ON bit and keeps the PMIC supplies on even after the DEV_ON + bit is turned off. This property enables driver to over ride the + POWERHOLD value to GPIO7 so as to turn off the PMIC in power off + scenarios. So for GPIO7 if ti,palmas-override-powerhold is set + then the GPIO_7 field should never be muxed to anything else. + It should be set to POWERHOLD by default and only in case of + power off scenarios the driver will over ride the mux value. This binding uses the following generic properties as defined in pinctrl-bindings.txt: diff --git a/Documentation/devicetree/bindings/usb/msm-android-usb.txt b/Documentation/devicetree/bindings/usb/msm-android-usb.txt new file mode 100644 index 000000000000..4422a186c43a --- /dev/null +++ b/Documentation/devicetree/bindings/usb/msm-android-usb.txt @@ -0,0 +1,36 @@ +ANDROID USB: + +This describes the device tree node for the Android USB gadget device. +This works in conjunction with a USB Device Controller (UDC) to provide +a dynamically configurable composition of functions to be exposed when +connected to a USB host. + +Required properties: +- compatible: should be "qcom,android-usb" + +Optional properties : +- reg : offset and length of memory region that is used by device to + update USB PID and serial numbers used by bootloader in DLOAD mode. +- qcom,pm-qos-latency : This property must be a list of three integer values + (perf, normal, sleep) where each value respresents DMA latency in microsecs. + First value represents DMA latency to vote with pm_qos when back to back USB + transfers are happening and it requires USB thoughput to be maximum. + Second value represents value to vote when not many USB transfers are + happening and it is OK to have higher DMA latency to save power. + Third value represents DMA latency to vote when USB BUS is IDLE and absolutely + no transfers are happening. It should allow transition to lowest power state. +- qcom,usb-core-id: Index to refer USB hardware core to bind android gadget driver + with UDC if multiple USB peripheral controllers are present. If unspecified, + core is set to zero by default. +- qcom,supported-func: Represents list of supported function drivers. If this + property is present android USB driver dynamically creats the list of + supported function drivers and uses this list instead of statically defined default + supported function driver list. +Example Android USB device node : + android_usb@fc42b0c8 { + compatible = "qcom,android-usb"; + reg = <0xfc42b0c8 0xc8>; + qcom,pm-qos-latency = <2 1001 12701>; + qcom,supported-func = "rndis_gsi","ecm_gsi","rmnet_gsi"; + qcom,usb-core-id = <1>; + }; diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 6cf9ad12c57f..1f52baea2f69 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -172,6 +172,23 @@ offgrpjquota Turn off group journelled quota. offprjjquota Turn off project journelled quota. quota Enable plain user disk quota accounting. noquota Disable all plain disk quota option. +whint_mode=%s Control which write hints are passed down to block + layer. This supports "off", "user-based", and + "fs-based". In "off" mode (default), f2fs does not pass + down hints. In "user-based" mode, f2fs tries to pass + down hints given by users. And in "fs-based" mode, f2fs + passes down hints with its policy. +alloc_mode=%s Adjust block allocation policy, which supports "reuse" + and "default". +fsync_mode=%s Control the policy of fsync. Currently supports "posix" + and "strict". In "posix" mode, which is default, fsync + will follow POSIX semantics and does a light operation + to improve the filesystem performance. In "strict" mode, + fsync will be heavy and behaves in line with xfs, ext4 + and btrfs, where xfstest generic/342 will pass, but the + performance will regress. +test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt + context. The fake fscrypt context is used by xfstests. ================================================================================ DEBUGFS ENTRIES diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index aaafd8178eab..e7aa730b927b 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -387,32 +387,6 @@ is not associated with a file: or if empty, the mapping is anonymous. -The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint -of the individual tasks of a process. In this file you will see a mapping marked -as [stack] if that task sees it as a stack. Hence, for the example above, the -task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this: - -08048000-08049000 r-xp 00000000 03:00 8312 /opt/test -08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test -0804a000-0806b000 rw-p 00000000 00:00 0 [heap] -a7cb1000-a7cb2000 ---p 00000000 00:00 0 -a7cb2000-a7eb2000 rw-p 00000000 00:00 0 -a7eb2000-a7eb3000 ---p 00000000 00:00 0 -a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack] -a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 -a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 -a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 -a800b000-a800e000 rw-p 00000000 00:00 0 -a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0 -a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0 -a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0 -a8024000-a8027000 rw-p 00000000 00:00 0 -a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2 -a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2 -a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2 -aff35000-aff4a000 rw-p 00000000 00:00 0 -ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso] - The /proc/PID/smaps is an extension based on maps, showing the memory consumption for each of the process's mappings. For each of mappings there is a series of lines such as the following: @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 120 +SUBLEVEL = 128 EXTRAVERSION = NAME = Blurry Fish Butt @@ -810,6 +810,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +# clang sets -fmerge-all-constants by default as optimization, but this +# is non-conforming behavior for C and in fact breaks the kernel, so we +# need to disable it here generally. +KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants) + +# for gcc -fno-merge-all-constants disables everything, but it is fine +# to have actual conforming behavior enabled. +KBUILD_CFLAGS += $(call cc-option,-fmerge-constants) + # Make sure -fstack-check isn't enabled (like gentoo apparently did) KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c index 6a61deed4a85..ab228ed45945 100644 --- a/arch/alpha/kernel/console.c +++ b/arch/alpha/kernel/console.c @@ -20,6 +20,7 @@ struct pci_controller *pci_vga_hose; static struct resource alpha_vga = { .name = "alpha-vga+", + .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 00352e761b8c..5c3bd34af9fd 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -411,6 +411,7 @@ interrupt-controller; ti,system-power-controller; + ti,palmas-override-powerhold; tps659038_pmic { compatible = "ti,tps659038-pmic"; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 864f60020124..0736d04f032e 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -410,6 +410,8 @@ tps659038: tps659038@58 { compatible = "ti,tps659038"; reg = <0x58>; + ti,palmas-override-powerhold; + ti,system-power-controller; tps659038_pmic { compatible = "ti,tps659038-pmic"; diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts index 40a474c4374b..4c52358734ef 100644 --- a/arch/arm/boot/dts/exynos4412-trats2.dts +++ b/arch/arm/boot/dts/exynos4412-trats2.dts @@ -359,7 +359,7 @@ reg = <0>; vdd3-supply = <&lcd_vdd3_reg>; vci-supply = <&ldo25_reg>; - reset-gpios = <&gpy4 5 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpf2 1 GPIO_ACTIVE_HIGH>; power-on-delay= <50>; reset-delay = <100>; init-delay = <100>; diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index 96d7eede412e..036c9bd9bf75 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts @@ -23,7 +23,7 @@ imx53-qsrb { pinctrl_pmic: pmicgrp { fsl,pins = < - MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ + MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */ >; }; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index 9e096d811bed..7a032dd84bb2 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi @@ -88,6 +88,7 @@ clocks = <&clks 201>; VDDA-supply = <®_2p5v>; VDDIO-supply = <®_3p3v>; + lrclk-strength = <3>; }; }; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 80f6c786a37e..e05670423d8b 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -90,6 +90,8 @@ }; &i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; clock-frequency = <2600000>; twl: twl@48 { @@ -137,6 +139,12 @@ OMAP3_CORE1_IOPAD(0x218e, PIN_OUTPUT | MUX_MODE4) /* mcbsp1_fsr.gpio_157 */ >; }; + i2c1_pins: pinmux_i2c1_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */ + OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */ + >; + }; }; &omap3_pmx_core2 { diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 9430a9928199..00de37fe5f8a 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -132,7 +132,7 @@ }; esdhc: esdhc@1560000 { - compatible = "fsl,esdhc"; + compatible = "fsl,ls1021a-esdhc", "fsl,esdhc"; reg = <0x0 0x1560000 0x0 0x10000>; interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; clock-frequency = <0>; diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts index 10d088df0c35..4a962a26482d 100644 --- a/arch/arm/boot/dts/moxart-uc7112lx.dts +++ b/arch/arm/boot/dts/moxart-uc7112lx.dts @@ -6,7 +6,7 @@ */ /dts-v1/; -/include/ "moxart.dtsi" +#include "moxart.dtsi" / { model = "MOXA UC-7112-LX"; diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi index 1fd27ed65a01..64f2f44235d0 100644 --- a/arch/arm/boot/dts/moxart.dtsi +++ b/arch/arm/boot/dts/moxart.dtsi @@ -6,6 +6,7 @@ */ /include/ "skeleton.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> / { compatible = "moxa,moxart"; @@ -36,8 +37,8 @@ ranges; intc: interrupt-controller@98800000 { - compatible = "moxa,moxart-ic"; - reg = <0x98800000 0x38>; + compatible = "moxa,moxart-ic", "faraday,ftintc010"; + reg = <0x98800000 0x100>; interrupt-controller; #interrupt-cells = <2>; interrupt-mask = <0x00080000>; @@ -59,7 +60,7 @@ timer: timer@98400000 { compatible = "moxa,moxart-timer"; reg = <0x98400000 0x42>; - interrupts = <19 1>; + interrupts = <19 IRQ_TYPE_EDGE_FALLING>; clocks = <&clk_apb>; }; @@ -80,7 +81,7 @@ dma: dma@90500000 { compatible = "moxa,moxart-dma"; reg = <0x90500080 0x40>; - interrupts = <24 0>; + interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; }; @@ -93,7 +94,7 @@ sdhci: sdhci@98e00000 { compatible = "moxa,moxart-sdhci"; reg = <0x98e00000 0x5C>; - interrupts = <5 0>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk_apb>; dmas = <&dma 5>, <&dma 5>; @@ -120,7 +121,7 @@ mac0: mac@90900000 { compatible = "moxa,moxart-mac"; reg = <0x90900000 0x90>; - interrupts = <25 0>; + interrupts = <25 IRQ_TYPE_LEVEL_HIGH>; phy-handle = <ðphy0>; phy-mode = "mii"; status = "disabled"; @@ -129,7 +130,7 @@ mac1: mac@92000000 { compatible = "moxa,moxart-mac"; reg = <0x92000000 0x90>; - interrupts = <27 0>; + interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; phy-handle = <ðphy1>; phy-mode = "mii"; status = "disabled"; @@ -138,7 +139,7 @@ uart0: uart@98200000 { compatible = "ns16550a"; reg = <0x98200000 0x20>; - interrupts = <31 8>; + interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; reg-shift = <2>; reg-io-width = <4>; clock-frequency = <14745600>; diff --git a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi index e731c7edd518..1ea94d546542 100644 --- a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi +++ b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -13,6 +13,7 @@ #include "msm8996-pinctrl.dtsi" #include "apq8096-camera-sensor-dragonboard.dtsi" #include "apq8096-ba.dtsi" +#include "msm8996-mdss-panels.dtsi" / { bluetooth: bt_qca6174 { @@ -326,8 +327,6 @@ }; }; -#include "msm8996-sde-display.dtsi" - &dsi_hx8379a_fwvga_truly_vid { qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm"; qcom,mdss-dsi-bl-min-level = <1>; diff --git a/arch/arm/boot/dts/qcom/apq8096-v2-auto-dragonboard.dts b/arch/arm/boot/dts/qcom/apq8096-v2-auto-dragonboard.dts index d200bf718de2..575b85d8d111 100644 --- a/arch/arm/boot/dts/qcom/apq8096-v2-auto-dragonboard.dts +++ b/arch/arm/boot/dts/qcom/apq8096-v2-auto-dragonboard.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ /dts-v1/; #include "apq8096-v2.dtsi" +#include "msm8996-sde.dtsi" #include "apq8096-auto-dragonboard.dtsi" / { diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-auto-dragonboard.dts b/arch/arm/boot/dts/qcom/apq8096-v3-auto-dragonboard.dts index 0f8e148ec3ab..7028d372dd04 100644 --- a/arch/arm/boot/dts/qcom/apq8096-v3-auto-dragonboard.dts +++ b/arch/arm/boot/dts/qcom/apq8096-v3-auto-dragonboard.dts @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,7 @@ /dts-v1/; #include "apq8096-v3.dtsi" +#include "msm8996-sde.dtsi" #include "apq8096-auto-dragonboard.dtsi" / { diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 9e14211bdac8..2adfb8b749eb 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -13,6 +13,7 @@ #include "msm8996-pinctrl.dtsi" #include "msm8996-camera-sensor-adp.dtsi" #include "msm8996-wsa881x.dtsi" +#include "msm8996-sde.dtsi" / { bluetooth: bt_qca6174 { diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index bbc0f37af17d..7cbc09767f62 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -13,6 +13,7 @@ #include "msm8996-pinctrl.dtsi" #include "msm8996-camera-sensor-auto-cdp.dtsi" #include "msm8996-wsa881x.dtsi" +#include "msm8996-sde.dtsi" / { bluetooth: bt_qca6174 { @@ -694,6 +695,8 @@ vdd-ntn-hsic-supply = <&pm8994_l25>; vdd-ntn-pci-supply = <&pm8994_s4>; vdd-ntn-io-supply = <&pm8994_s4>; + vdd-ntn-phy-supply = <&pm8994_l29>; + vdd-ntn-core-supply = <&pm8994_l31>; qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <1>; @@ -1223,6 +1226,7 @@ &usb2s { status = "ok"; vbus_dwc3-supply = <&usb2_otg_switch>; + qcom,no-wakeup-src-in-hostmode; dwc3@7600000 { dr_mode = "host"; }; @@ -1233,6 +1237,7 @@ vbus_dwc3-supply = <&usb_otg_switch>; vdda33-supply = <&pm8994_l24>; vdda18-supply = <&pm8994_l12>; + qcom,no-wakeup-src-in-hostmode; }; &blsp1_uart2 { diff --git a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi index 6fafb8b38d06..f544934c84e4 100644 --- a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -340,7 +340,6 @@ }; }; -#include "msm8996-sde-display.dtsi" #include "msm8996-mdss-panels.dtsi" &mdss_mdp { @@ -400,7 +399,6 @@ qcom,mdss-dsi-bl-min-level = <1>; qcom,mdss-dsi-bl-max-level = <4095>; qcom,cont-splash-enabled; - qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>; qcom,platform-reset-gpio = <&tlmm 8 0>; qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>; qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>; diff --git a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi index 3223d874cc23..c06a44c31b34 100644 --- a/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-cv2x.dtsi @@ -190,12 +190,6 @@ }; }; -/delete-node/ &sde_kms; -/delete-node/ &sde_dsi0; -/delete-node/ &sde_dsi1; -/delete-node/ &sde_dsi_phy0; -/delete-node/ &sde_dsi_phy1; -/delete-node/ &sde_hdmi_tx; /delete-node/ &mdss_mdp; /delete-node/ &mdss_dsi; /delete-node/ &msm_ext_disp; diff --git a/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi index c2667b49fedb..23709bf5948d 100644 --- a/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -11,9 +11,9 @@ */ #include "msm8996-pinctrl.dtsi" -#include "msm8996-sde-display.dtsi" #include "msm8996-camera-sensor-dtp.dtsi" #include "msm8996-wsa881x.dtsi" +#include "msm8996-mdss-panels.dtsi" / { model = "Qualcomm Technologies, Inc. MSM8996 DTP"; diff --git a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi index 86bc8099c4d6..de63afeb1b69 100644 --- a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -587,7 +587,6 @@ status = "ok"; }; -#include "msm8996-sde-display.dtsi" #include "msm8996-mdss-panels.dtsi" &mdss_mdp { diff --git a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi index 571e67a7dd93..3eef360c54c2 100644 --- a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016,2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -294,12 +294,16 @@ }; }; -#include "msm8996-sde-display.dtsi" +#include "msm8996-mdss-panels.dtsi" &mdss_mdp { qcom,mdss-pref-prim-intf = "dsi"; }; +&mdss_dsi { + hw-config = "split_dsi"; +}; + &mdss_dsi0 { qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_4k_nofbc_video>; pinctrl-names = "mdss_default", "mdss_sleep"; diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi index 24f593aa5e9f..9442239c56a5 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -336,21 +336,12 @@ }; }; -#include "msm8996-sde-display.dtsi" #include "msm8996-mdss-panels.dtsi" &mdss_mdp { qcom,mdss-pref-prim-intf = "dsi"; }; -&sde_hdmi { - status = "ok"; -}; - -&mdss_dsi { - hw-config = "split_dsi"; -}; - &mdss_dsi0 { qcom,dsi-pref-prim-pan = <&dsi_dual_sharp_video>; pinctrl-names = "mdss_default", "mdss_sleep"; @@ -385,7 +376,7 @@ qcom,mdss-dsi-bl-min-level = <1>; qcom,mdss-dsi-bl-max-level = <4095>; qcom,cont-splash-enabled; - qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>; qcom,platform-reset-gpio = <&tlmm 8 0>; qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>; qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>; diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi index 11c45606f6c2..661f0ebfd8fa 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -187,6 +187,11 @@ iommus = <&mdp_smmu 0>; }; + smmu_kms_sec: qcom,smmu_kms_sec_cb { + compatible = "qcom,smmu_sde_sec"; + iommus = <&mdp_smmu 1>; + }; + /* data and reg bus scale settings */ qcom,sde-data-bus { qcom,msm-bus,name = "mdss_sde"; diff --git a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi index 698c0193a164..db6237072829 100644 --- a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -480,11 +480,7 @@ gdsc-venus-supply = <&gdsc_venus>; }; -&sde_hdmi_tx { - hpd-gdsc-venus-supply = <&gdsc_venus>; -}; - -&mdss_dsi0 { +&mdss_dsi { gdsc-venus-supply = <&gdsc_venus>; qcom,core-supply-entries { #address-cells = <1>; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index 2d30aab29a0e..efc91f9f86f7 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -266,7 +266,6 @@ }; #include "msm8996-ion.dtsi" -#include "msm8996-sde.dtsi" #include "msm8996-mdss.dtsi" #include "msm8996-mdss-pll.dtsi" #include "msm8996-smp2p.dtsi" diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index cd88489e0978..7d61370a16c0 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1686,7 +1686,7 @@ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>; vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>; vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>; - qcom,vdd-0.8-cx-mx-config = <525000 950000>; + qcom,vdd-0.8-cx-mx-config = <848000 848000>; qcom,vdd-1.8-xo-config = <1750000 1900000>; qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index ec6598c5ec35..593934328ac5 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -1966,7 +1966,7 @@ vdd-1.8-xo-supply = <&pm660_l9_pin_ctrl>; vdd-1.3-rfa-supply = <&pm660_l6_pin_ctrl>; vdd-3.3-ch0-supply = <&pm660_l19_pin_ctrl>; - qcom,vdd-0.8-cx-mx-config = <525000 950000>; + qcom,vdd-0.8-cx-mx-config = <848000 848000>; qcom,vdd-1.8-xo-config = <1750000 1900000>; qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts index 219cc7427da9..7900d963bef3 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-ivi-la.dts @@ -103,3 +103,160 @@ }; }; +#include "vplatform-lfv-msm8996-usb.dtsi" + +&usb3 { + qcom,disable-host-mode-pm; + qcom,disable-dev-mode-pm; + status = "okay"; +}; + +&qusb_phy1 { + status = "disabled"; +}; + +&usb_nop_phy { + status = "disabled"; +}; + +&usb_detect { + qcom,force-vbus-status-off; +}; + +&android_usb { + status = "okay"; +}; + +&blsp1_uart2 { + status = "okay"; +}; + +&pm8994_gpios { + gpio@c600 { /* GPIO 7 - adv7481 INT3 */ + qcom,mode = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + + gpio@c800 { /* GPIO 9 - Rome 3.3V control */ + qcom,mode = <1>; /* Digital output */ + qcom,output-type = <0>; /* MOS logic */ + qcom,invert = <1>; /* Output high */ + qcom,vin-sel = <0>; /* VPH_PWR */ + qcom,src-sel = <0>; /* Constant */ + qcom,out-strength = <1>; /* High drive strength */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + + gpio@c900 { /* GPIO 10 - NFC CLK _REQ*/ + qcom,mode = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; + + gpio@cd00 { /* GPIO 14 - lcd_bklt_reg_en */ + qcom,mode = <1>; /* DIGITAL OUT */ + qcom,output-type = <0>; /* CMOS logic */ + qcom,invert = <1>; /* output hight initially */ + qcom,vin-sel = <2>; /* 1.8 */ + qcom,src-sel = <0>; /* CONSTANT */ + qcom,out-strength = <1>; /* Low drive strength */ + qcom,master-en = <1>; /* ENABLE GPIO */ + status = "okay"; + }; + gpio@c100 { /* GPIO 2 */ + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + gpio@c300 { /* GPIO 4 - adv7481 RST */ + qcom,mode = <1>; + qcom,pull = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + + gpio@c400 { /* GPIO 5 - adv7481 INT1 */ + qcom,mode = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + + gpio@c500 { /* GPIO 6 - adv7481 INT2*/ + qcom,mode = <0>; + qcom,vin-sel = <2>; + qcom,src-sel = <0>; + status = "okay"; + }; + + gpio@ca00 { /* GPIO 11 - USB enb1 (otg switch) */ + qcom,mode = <1>; /* DIGITAL OUT */ + qcom,vin-sel = <2>; /* 1.8 */ + qcom,src-sel = <0>; /* GPIO */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + + gpio@cc00 { /* GPIO 13 - NTN_RST */ + qcom,mode = <1>; /* DIGITAL OUT */ + qcom,output-type = <0>; /* CMOS logic */ + qcom,pull = <5>; + qcom,vin-sel = <2>; /* 1.8 */ + qcom,out-strength = <1>; + qcom,src-sel = <0>; /* GPIO */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + + gpio@ce00 { /* GPIO 15 */ + qcom,mode = <1>; + qcom,output-type = <0>; + qcom,pull = <5>; + qcom,vin-sel = <2>; + qcom,out-strength = <1>; + qcom,src-sel = <2>; + qcom,master-en = <1>; + status = "okay"; + }; + gpio@d000 { /* GPIO 17 - USB1 VBUS detect */ + qcom,mode = <0>; /* Digital Input*/ + qcom,pull = <5>; /* No pull */ + qcom,vin-sel = <2>; /* 1.8 V */ + qcom,src-sel = <0>; /* GPIO */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + + gpio@d100 { /* GPIO 18 - Rome Sleep Clock */ + qcom,mode = <1>; /* Digital output */ + qcom,output-type = <0>; /* CMOS logic */ + qcom,invert = <0>; /* Output low initially */ + qcom,vin-sel = <2>; /* VIN 2 */ + qcom,src-sel = <3>; /* Function 2 */ + qcom,out-strength = <2>; /* Medium */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + + gpio@d200 { /* GPIO 19 - Rome BT Reset */ + qcom,mode = <1>; /* Digital output*/ + qcom,pull = <4>; /* Pulldown 10uA */ + qcom,vin-sel = <2>; /* VIN2 */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; +}; + +&sde_kms_hyp { + qcom,client-id = "7815"; +}; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts index 83f87d0ee2ff..d063a883a9e1 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-telematics.dts @@ -183,7 +183,7 @@ subsys_notif_virt: qcom,subsys_notif_virt@2D000000 { compatible = "qcom,subsys-notif-virt"; - reg = <0x2D000000 0x10>; + reg = <0x2D000000 0x18>; reg-names = "vdev_base"; adsp { subsys-name = "adsp"; @@ -193,6 +193,10 @@ subsys-name = "modem"; offset = <8>; }; + wlan { + subsys-name = "AR6320"; + offset = <16>; + }; }; }; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi index b7ede0adbf19..b7505743986e 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996-usb.dtsi @@ -209,6 +209,14 @@ }; }; + android_usb: android_usb@66bf0c8 { + status = "disabled"; + compatible = "qcom,android-usb"; + reg = <0x066bf0c8 0xc8>; + qcom,pm-qos-latency = <301 701 801>; + + }; + qusb_phy0: qusb@7411000 { compatible = "qcom,qusb2phy"; reg = <0x07411000 0x180>, diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi index f993b21e323c..7641e42b58b7 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dtsi @@ -294,4 +294,19 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <2950000>; }; + + qcom_rng: qrng@83000 { + compatible = "qcom,msm-rng"; + reg = <0x83000 0x1000>; + qcom,msm-rng-iface-clk; + qcom,no-qrng-config; + qcom,msm-bus,name = "msm-rng-noc"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <1 618 0 0>, /* No vote */ + <1 618 0 800>; /* 100 MB/s */ + clocks = <&clock_gcc clk_gcc_prng_ahb_clk>; + clock-names = "iface_clk"; + }; }; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index 7b39d8fae61e..bd83a61f724f 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -1360,8 +1360,11 @@ compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks"; reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; clocks = <&p_clk>, - <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, - <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, + <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, + <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, + <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, + <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, + <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>, <&p_clk>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>, diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index fc44ea361a4b..62eae315af1f 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts @@ -280,7 +280,7 @@ x2_clk: x2-clock { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <148500000>; + clock-frequency = <74250000>; }; x13_clk: x13-clock { diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 328f48bd15e7..d2585a4c6098 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -1374,8 +1374,11 @@ compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks"; reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>; clocks = <&p_clk>, - <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, - <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, + <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, + <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, + <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, + <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, + <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>, <&p_clk>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>, diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig index 49e7bbd5825a..d5a16dfe678f 100644 --- a/arch/arm/configs/ranchu_defconfig +++ b/arch/arm/configs/ranchu_defconfig @@ -173,6 +173,7 @@ CONFIG_USB_USBNET=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +CONFIG_KEYBOARD_GOLDFISH_ROTARY=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_XPAD=y diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h index 71e473d05fcc..620dc75362e5 100644 --- a/arch/arm/include/asm/xen/events.h +++ b/arch/arm/include/asm/xen/events.h @@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) return raw_irqs_disabled_flags(regs->ARM_cpsr); } -#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\ atomic64_t, \ counter), (val)) diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 709ee1d6d4df..faa9a905826e 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -29,11 +29,6 @@ #endif #ifdef CONFIG_DYNAMIC_FTRACE -#ifdef CONFIG_OLD_MCOUNT -#define OLD_MCOUNT_ADDR ((unsigned long) mcount) -#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) - -#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ static int __ftrace_modify_code(void *data) { @@ -51,6 +46,12 @@ void arch_ftrace_update_code(int command) stop_machine(__ftrace_modify_code, &command, NULL); } +#ifdef CONFIG_OLD_MCOUNT +#define OLD_MCOUNT_ADDR ((unsigned long) mcount) +#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) + +#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ + static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) { return rec->arch.old_mcount ? OLD_NOP : NOP; diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 28c90bc372bd..78d325f3245a 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -795,6 +795,8 @@ static struct platform_device da8xx_dsp = { .resource = da8xx_rproc_resources, }; +static bool rproc_mem_inited __initdata; + #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC) static phys_addr_t rproc_base __initdata; @@ -833,6 +835,8 @@ void __init da8xx_rproc_reserve_cma(void) ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0); if (ret) pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret); + else + rproc_mem_inited = true; } #else @@ -847,6 +851,12 @@ int __init da8xx_register_rproc(void) { int ret; + if (!rproc_mem_inited) { + pr_warn("%s: memory not reserved for DSP, not registering DSP device\n", + __func__); + return -ENOMEM; + } + ret = platform_device_register(&da8xx_dsp); if (ret) pr_err("%s: can't register DSP device: %d\n", __func__, ret); diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c index 5b0f752d5507..24be631e487d 100644 --- a/arch/arm/mach-imx/cpu.c +++ b/arch/arm/mach-imx/cpu.c @@ -133,6 +133,9 @@ struct device * __init imx_soc_device_init(void) case MXC_CPU_IMX6UL: soc_id = "i.MX6UL"; break; + case MXC_CPU_IMX6ULL: + soc_id = "i.MX6ULL"; + break; case MXC_CPU_IMX7D: soc_id = "i.MX7D"; break; diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h index a5b1af6d7441..478cd91d0885 100644 --- a/arch/arm/mach-imx/mxc.h +++ b/arch/arm/mach-imx/mxc.h @@ -39,6 +39,7 @@ #define MXC_CPU_IMX6SX 0x62 #define MXC_CPU_IMX6Q 0x63 #define MXC_CPU_IMX6UL 0x64 +#define MXC_CPU_IMX6ULL 0x65 #define MXC_CPU_IMX7D 0x72 #define IMX_DDR_TYPE_LPDDR2 1 @@ -171,6 +172,11 @@ static inline bool cpu_is_imx6ul(void) return __mxc_cpu_type == MXC_CPU_IMX6UL; } +static inline bool cpu_is_imx6ull(void) +{ + return __mxc_cpu_type == MXC_CPU_IMX6ULL; +} + static inline bool cpu_is_imx6q(void) { return __mxc_cpu_type == MXC_CPU_IMX6Q; diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index e20fc4178b15..1c8a6098a2ca 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig @@ -37,7 +37,7 @@ config MACH_ARMADA_370 config MACH_ARMADA_375 bool "Marvell Armada 375 boards" if ARCH_MULTI_V7 select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARMADA_375_CLK select HAVE_ARM_SCU @@ -52,7 +52,7 @@ config MACH_ARMADA_375 config MACH_ARMADA_38X bool "Marvell Armada 380/385 boards" if ARCH_MULTI_V7 select ARM_ERRATA_720789 - select ARM_ERRATA_753970 + select PL310_ERRATA_753970 select ARM_GIC select ARMADA_38X_CLK select HAVE_ARM_SCU diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c index 7581e036bda6..70e3b711e79c 100644 --- a/arch/arm/mach-omap2/clockdomains7xx_data.c +++ b/arch/arm/mach-omap2/clockdomains7xx_data.c @@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = { .dep_bit = DRA7XX_PCIE_STATDEP_SHIFT, .wkdep_srcs = pcie_wkup_sleep_deps, .sleepdep_srcs = pcie_wkup_sleep_deps, - .flags = CLKDM_CAN_HWSUP_SWSUP, + .flags = CLKDM_CAN_SWSUP, }; static struct clockdomain atl_7xx_clkdm = { diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c index 9ff92050053c..fa7f308c9027 100644 --- a/arch/arm/mach-omap2/omap-secure.c +++ b/arch/arm/mach-omap2/omap-secure.c @@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void) return omap_secure_memblock_base; } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) u32 omap3_save_secure_ram(void __iomem *addr, int size) { u32 ret; @@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size) return ret; } +#endif /** * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 89e6a3df6dbf..3e3d509c3c03 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -203,6 +203,16 @@ config NEED_SG_DMA_LENGTH config SMP def_bool y +config HOTPLUG_SIZE_BITS + int "Memory hotplug block size(28 => 256MB 30 => 1GB)" + depends on SPARSEMEM + default 30 + help + Selects granularity of hotplug memory. Block + size for memory hotplug is represent as a power + of 2. + If unsure, stick with default value. + config ARM64_DMA_USE_IOMMU bool select ARM_HAS_SG_CHAIN diff --git a/arch/arm64/configs/msm-auto-gvm-perf_defconfig b/arch/arm64/configs/msm-auto-gvm-perf_defconfig index ddd28492ca72..91115071f99b 100644 --- a/arch/arm64/configs/msm-auto-gvm-perf_defconfig +++ b/arch/arm64/configs/msm-auto-gvm-perf_defconfig @@ -177,6 +177,16 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y CONFIG_SRAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -186,12 +196,14 @@ CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_VIRTIO_NET=y +CONFIG_RNDIS_IPA=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y CONFIG_PPPOE=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CNSS_CRYPTO=y CONFIG_ATH_CARDS=y @@ -207,6 +219,7 @@ CONFIG_INPUT_UINPUT=y CONFIG_INPUT_GPIO=m CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_MSM_HS=y +CONFIG_DIAG_CHAR=y CONFIG_MSM_SMD_PKT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MSM_V2=y @@ -241,6 +254,37 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_ISP1760=y +CONFIG_USB_QTI_KS_BRIDGE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_USB_ULPI=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_QCRNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y CONFIG_MMC_PARANOID_SD_INIT=y @@ -268,6 +312,8 @@ CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_IPA=y CONFIG_RMNET_IPA=y +CONFIG_GPIO_USB_DETECT=y +CONFIG_USB_BAM=y CONFIG_COMMON_CLK_MSM=y CONFIG_MSM_CLK_CONTROLLER_V2=y CONFIG_REMOTE_SPINLOCK_MSM=y @@ -298,6 +344,7 @@ CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_EXTCON=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT2_FS=y diff --git a/arch/arm64/configs/msm-auto-gvm_defconfig b/arch/arm64/configs/msm-auto-gvm_defconfig index aaece212396a..56f6629ee4fb 100644 --- a/arch/arm64/configs/msm-auto-gvm_defconfig +++ b/arch/arm64/configs/msm-auto-gvm_defconfig @@ -176,6 +176,16 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_VIRTIO_BLK=y CONFIG_SRAM=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y CONFIG_DM_UEVENT=y @@ -185,12 +195,14 @@ CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_VIRTIO_NET=y +CONFIG_RNDIS_IPA=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y CONFIG_PPPOE=y CONFIG_PPP_ASYNC=y CONFIG_PPP_SYNC_TTY=y +CONFIG_USB_USBNET=y CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_CNSS_CRYPTO=y CONFIG_ATH_CARDS=y @@ -209,6 +221,7 @@ CONFIG_VT_HW_CONSOLE_BINDING=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_MSM_HS=y +CONFIG_DIAG_CHAR=y CONFIG_MSM_SMD_PKT=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_MSM_V2=y @@ -244,6 +257,37 @@ CONFIG_HID_ELECOM=y CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y CONFIG_HID_MULTITOUCH=y +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_ISP1760=y +CONFIG_USB_QTI_KS_BRIDGE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_USB_ULPI=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_SERIAL=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_ECM=y +CONFIG_USB_CONFIGFS_QCRNDIS=y +CONFIG_USB_CONFIGFS_RMNET_BAM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_QDSS=y CONFIG_MMC=y CONFIG_MMC_PERF_PROFILING=y CONFIG_MMC_PARANOID_SD_INIT=y @@ -271,6 +315,8 @@ CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_IPA=y CONFIG_RMNET_IPA=y +CONFIG_GPIO_USB_DETECT=y +CONFIG_USB_BAM=y CONFIG_COMMON_CLK_MSM=y CONFIG_MSM_CLK_CONTROLLER_V2=y CONFIG_REMOTE_SPINLOCK_MSM=y @@ -304,6 +350,7 @@ CONFIG_MSM_SUBSYSTEM_RESTART=y CONFIG_MSM_PIL=y CONFIG_MSM_PIL_SSR_GENERIC=y CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_EXTCON=y CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_IPC=y CONFIG_EXT2_FS=y diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig index 2d180db2bff5..3d7794040c2a 100644 --- a/arch/arm64/configs/msm-auto-perf_defconfig +++ b/arch/arm64/configs/msm-auto-perf_defconfig @@ -1,3 +1,4 @@ +CONFIG_HOTPLUG_SIZE_BITS=28 CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_FHANDLE=y @@ -320,6 +321,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -497,6 +499,7 @@ CONFIG_IPA_UT=y CONFIG_GPIO_USB_DETECT=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y +CONFIG_MSM_11AD=y CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_MDSS_PLL=y diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig index 1ee27bd3db0a..7bbf952a1245 100644 --- a/arch/arm64/configs/msm-auto_defconfig +++ b/arch/arm64/configs/msm-auto_defconfig @@ -1,3 +1,4 @@ +CONFIG_HOTPLUG_SIZE_BITS=28 # CONFIG_LOCALVERSION_AUTO is not set CONFIG_FHANDLE=y CONFIG_AUDIT=y @@ -323,6 +324,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_SMD_PKT=y CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y @@ -503,6 +505,7 @@ CONFIG_GPIO_USB_DETECT=y CONFIG_MSM_MHI=y CONFIG_MSM_MHI_UCI=y CONFIG_MSM_MHI_DEBUG=y +CONFIG_MSM_11AD=y CONFIG_SEEMP_CORE=y CONFIG_USB_BAM=y CONFIG_MSM_MDSS_PLL=y diff --git a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig index d1472da82466..2c7be3c7b5d6 100644 --- a/arch/arm64/configs/msmcortex_mediabox-perf_defconfig +++ b/arch/arm64/configs/msmcortex_mediabox-perf_defconfig @@ -1,5 +1,6 @@ CONFIG_LOCALVERSION="-perf" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -11,16 +12,16 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_SCHEDTUNE=y +CONFIG_MEMCG=y CONFIG_RT_GROUP_SCHED=y CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -110,6 +111,7 @@ CONFIG_IPV6_MIP6=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=y CONFIG_NF_CONNTRACK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -140,6 +142,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y CONFIG_NETFILTER_XT_MATCH_COMMENT=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y @@ -149,6 +152,7 @@ CONFIG_NETFILTER_XT_MATCH_ESP=y CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y CONFIG_NETFILTER_XT_MATCH_HELPER=y CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y # CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=y CONFIG_NETFILTER_XT_MATCH_LIMIT=y @@ -166,6 +170,7 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=y CONFIG_NETFILTER_XT_MATCH_STRING=y CONFIG_NETFILTER_XT_MATCH_TIME=y CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_VS=y CONFIG_NF_CONNTRACK_IPV4=y CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_MATCH_AH=y @@ -271,6 +276,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_SKY2=y CONFIG_RNDIS_IPA=y CONFIG_SMSC911X=y @@ -305,6 +311,7 @@ CONFIG_INPUT_QPNP_POWER_ON=y CONFIG_INPUT_UINPUT=y # CONFIG_SERIO_SERPORT is not set # CONFIG_VT is not set +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y # CONFIG_LEGACY_PTYS is not set # CONFIG_DEVMEM is not set # CONFIG_DEVKMEM is not set @@ -590,7 +597,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_BTRFS_FS=y CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig index 07403724a09d..03e43dc0b7c0 100644 --- a/arch/arm64/configs/msmcortex_mediabox_defconfig +++ b/arch/arm64/configs/msmcortex_mediabox_defconfig @@ -1,4 +1,5 @@ # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y CONFIG_AUDIT=y # CONFIG_AUDITSYSCALL is not set CONFIG_NO_HZ=y @@ -13,16 +14,16 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_SCHEDTUNE=y +CONFIG_MEMCG=y CONFIG_RT_GROUP_SCHED=y CONFIG_SCHED_HMP=y CONFIG_SCHED_HMP_CSTATE_AWARE=y CONFIG_SCHED_CORE_CTL=y CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_PID_NS is not set CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_TUNE=y CONFIG_BLK_DEV_INITRD=y @@ -108,6 +109,7 @@ CONFIG_IPV6_MIP6=y CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=y CONFIG_NF_CONNTRACK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -138,6 +140,7 @@ CONFIG_NETFILTER_XT_TARGET_TPROXY=y CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y CONFIG_NETFILTER_XT_MATCH_COMMENT=y CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y CONFIG_NETFILTER_XT_MATCH_CONNMARK=y @@ -147,6 +150,7 @@ CONFIG_NETFILTER_XT_MATCH_ESP=y CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y CONFIG_NETFILTER_XT_MATCH_HELPER=y CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y # CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=y CONFIG_NETFILTER_XT_MATCH_LIMIT=y @@ -164,6 +168,7 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=y CONFIG_NETFILTER_XT_MATCH_STRING=y CONFIG_NETFILTER_XT_MATCH_TIME=y CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_IP_VS=y CONFIG_NF_CONNTRACK_IPV4=y CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_MATCH_AH=y @@ -274,6 +279,7 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y +CONFIG_VETH=y CONFIG_RNDIS_IPA=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y @@ -307,6 +313,7 @@ CONFIG_INPUT_KEYCHORD=y CONFIG_INPUT_UINPUT=y CONFIG_INPUT_GPIO=y # CONFIG_SERIO_SERPORT is not set +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y @@ -605,7 +612,9 @@ CONFIG_EXT4_FS_SECURITY=y CONFIG_EXT4_ENCRYPTION=y CONFIG_EXT4_FS_ENCRYPTION=y CONFIG_EXT4_FS_ICE_ENCRYPTION=y +CONFIG_BTRFS_FS=y CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_TMPFS_POSIX_ACL=y diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig index 48c357b8f82c..1a832addca83 100644 --- a/arch/arm64/configs/sdm660-perf_defconfig +++ b/arch/arm64/configs/sdm660-perf_defconfig @@ -295,6 +295,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig index 8ebf411375c8..85175deb6efa 100644 --- a/arch/arm64/configs/sdm660_defconfig +++ b/arch/arm64/configs/sdm660_defconfig @@ -295,6 +295,7 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y CONFIG_ATH_CARDS=y CONFIG_WIL6210=m CONFIG_CLD_LL_CORE=y +CONFIG_CNSS_GENL=y CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y CONFIG_KEYBOARD_GPIO=y diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index de1aab4b5da8..487cd382d333 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -58,4 +58,11 @@ config CRYPTO_CRC32_ARM64 tristate "CRC32 and CRC32C using optional ARMv8 instructions" depends on ARM64 select CRYPTO_HASH + +config CRYPTO_SPECK_NEON + tristate "NEON accelerated Speck cipher algorithms" + depends on KERNEL_MODE_NEON + select CRYPTO_BLKCIPHER + select CRYPTO_GF128MUL + select CRYPTO_SPECK endif diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index f0a8f2475ea3..28a71246e0a3 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -32,6 +32,9 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o +obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o +speck-neon-y := speck-neon-core.o speck-neon-glue.o + AFLAGS_aes-ce.o := -DINTERLEAVE=4 AFLAGS_aes-neon.o := -DINTERLEAVE=4 diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S new file mode 100644 index 000000000000..b14463438b09 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-core.S @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * + * Copyright (c) 2018 Google, Inc + * + * Author: Eric Biggers <ebiggers@google.com> + */ + +#include <linux/linkage.h> + + .text + + // arguments + ROUND_KEYS .req x0 // const {u64,u32} *round_keys + NROUNDS .req w1 // int nrounds + NROUNDS_X .req x1 + DST .req x2 // void *dst + SRC .req x3 // const void *src + NBYTES .req w4 // unsigned int nbytes + TWEAK .req x5 // void *tweak + + // registers which hold the data being encrypted/decrypted + // (underscores avoid a naming collision with ARM64 registers x0-x3) + X_0 .req v0 + Y_0 .req v1 + X_1 .req v2 + Y_1 .req v3 + X_2 .req v4 + Y_2 .req v5 + X_3 .req v6 + Y_3 .req v7 + + // the round key, duplicated in all lanes + ROUND_KEY .req v8 + + // index vector for tbl-based 8-bit rotates + ROTATE_TABLE .req v9 + ROTATE_TABLE_Q .req q9 + + // temporary registers + TMP0 .req v10 + TMP1 .req v11 + TMP2 .req v12 + TMP3 .req v13 + + // multiplication table for updating XTS tweaks + GFMUL_TABLE .req v14 + GFMUL_TABLE_Q .req q14 + + // next XTS tweak value(s) + TWEAKV_NEXT .req v15 + + // XTS tweaks for the blocks currently being encrypted/decrypted + TWEAKV0 .req v16 + TWEAKV1 .req v17 + TWEAKV2 .req v18 + TWEAKV3 .req v19 + TWEAKV4 .req v20 + TWEAKV5 .req v21 + TWEAKV6 .req v22 + TWEAKV7 .req v23 + + .align 4 +.Lror64_8_table: + .octa 0x080f0e0d0c0b0a090007060504030201 +.Lror32_8_table: + .octa 0x0c0f0e0d080b0a090407060500030201 +.Lrol64_8_table: + .octa 0x0e0d0c0b0a09080f0605040302010007 +.Lrol32_8_table: + .octa 0x0e0d0c0f0a09080b0605040702010003 +.Lgf128mul_table: + .octa 0x00000000000000870000000000000001 +.Lgf64mul_table: + .octa 0x0000000000000000000000002d361b00 + +/* + * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time + * + * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for + * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes + * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64. + * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64. + */ +.macro _speck_round_128bytes n, lanes + + // x = ror(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b + + // x += y + add X_0.\lanes, X_0.\lanes, Y_0.\lanes + add X_1.\lanes, X_1.\lanes, Y_1.\lanes + add X_2.\lanes, X_2.\lanes, Y_2.\lanes + add X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // y = rol(y, 3) + shl TMP0.\lanes, Y_0.\lanes, #3 + shl TMP1.\lanes, Y_1.\lanes, #3 + shl TMP2.\lanes, Y_2.\lanes, #3 + shl TMP3.\lanes, Y_3.\lanes, #3 + sri TMP0.\lanes, Y_0.\lanes, #(\n - 3) + sri TMP1.\lanes, Y_1.\lanes, #(\n - 3) + sri TMP2.\lanes, Y_2.\lanes, #(\n - 3) + sri TMP3.\lanes, Y_3.\lanes, #(\n - 3) + + // y ^= x + eor Y_0.16b, TMP0.16b, X_0.16b + eor Y_1.16b, TMP1.16b, X_1.16b + eor Y_2.16b, TMP2.16b, X_2.16b + eor Y_3.16b, TMP3.16b, X_3.16b +.endm + +/* + * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time + * + * This is the inverse of _speck_round_128bytes(). + */ +.macro _speck_unround_128bytes n, lanes + + // y ^= x + eor TMP0.16b, Y_0.16b, X_0.16b + eor TMP1.16b, Y_1.16b, X_1.16b + eor TMP2.16b, Y_2.16b, X_2.16b + eor TMP3.16b, Y_3.16b, X_3.16b + + // y = ror(y, 3) + ushr Y_0.\lanes, TMP0.\lanes, #3 + ushr Y_1.\lanes, TMP1.\lanes, #3 + ushr Y_2.\lanes, TMP2.\lanes, #3 + ushr Y_3.\lanes, TMP3.\lanes, #3 + sli Y_0.\lanes, TMP0.\lanes, #(\n - 3) + sli Y_1.\lanes, TMP1.\lanes, #(\n - 3) + sli Y_2.\lanes, TMP2.\lanes, #(\n - 3) + sli Y_3.\lanes, TMP3.\lanes, #(\n - 3) + + // x ^= k + eor X_0.16b, X_0.16b, ROUND_KEY.16b + eor X_1.16b, X_1.16b, ROUND_KEY.16b + eor X_2.16b, X_2.16b, ROUND_KEY.16b + eor X_3.16b, X_3.16b, ROUND_KEY.16b + + // x -= y + sub X_0.\lanes, X_0.\lanes, Y_0.\lanes + sub X_1.\lanes, X_1.\lanes, Y_1.\lanes + sub X_2.\lanes, X_2.\lanes, Y_2.\lanes + sub X_3.\lanes, X_3.\lanes, Y_3.\lanes + + // x = rol(x, 8) + tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b + tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b + tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b + tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b +.endm + +.macro _next_xts_tweak next, cur, tmp, n +.if \n == 64 + /* + * Calculate the next tweak by multiplying the current one by x, + * modulo p(x) = x^128 + x^7 + x^2 + x + 1. + */ + sshr \tmp\().2d, \cur\().2d, #63 + and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b + shl \next\().2d, \cur\().2d, #1 + ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8 + eor \next\().16b, \next\().16b, \tmp\().16b +.else + /* + * Calculate the next two tweaks by multiplying the current ones by x^2, + * modulo p(x) = x^64 + x^4 + x^3 + x + 1. + */ + ushr \tmp\().2d, \cur\().2d, #62 + shl \next\().2d, \cur\().2d, #2 + tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b + eor \next\().16b, \next\().16b, \tmp\().16b +.endif +.endm + +/* + * _speck_xts_crypt() - Speck-XTS encryption/decryption + * + * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer + * using Speck-XTS, specifically the variant with a block size of '2n' and round + * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and + * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a + * nonzero multiple of 128. + */ +.macro _speck_xts_crypt n, lanes, decrypting + + /* + * If decrypting, modify the ROUND_KEYS parameter to point to the last + * round key rather than the first, since for decryption the round keys + * are used in reverse order. + */ +.if \decrypting + mov NROUNDS, NROUNDS /* zero the high 32 bits */ +.if \n == 64 + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3 + sub ROUND_KEYS, ROUND_KEYS, #8 +.else + add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2 + sub ROUND_KEYS, ROUND_KEYS, #4 +.endif +.endif + + // Load the index vector for tbl-based 8-bit rotates +.if \decrypting + ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table +.else + ldr ROTATE_TABLE_Q, .Lror\n\()_8_table +.endif + + // One-time XTS preparation +.if \n == 64 + // Load first tweak + ld1 {TWEAKV0.16b}, [TWEAK] + + // Load GF(2^128) multiplication table + ldr GFMUL_TABLE_Q, .Lgf128mul_table +.else + // Load first tweak + ld1 {TWEAKV0.8b}, [TWEAK] + + // Load GF(2^64) multiplication table + ldr GFMUL_TABLE_Q, .Lgf64mul_table + + // Calculate second tweak, packing it together with the first + ushr TMP0.2d, TWEAKV0.2d, #63 + shl TMP1.2d, TWEAKV0.2d, #1 + tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b + eor TMP0.8b, TMP0.8b, TMP1.8b + mov TWEAKV0.d[1], TMP0.d[0] +.endif + +.Lnext_128bytes_\@: + + // Calculate XTS tweaks for next 128 bytes + _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n + _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n + _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n + _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n + _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n + _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n + _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n + _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n + + // Load the next source blocks into {X,Y}[0-3] + ld1 {X_0.16b-Y_1.16b}, [SRC], #64 + ld1 {X_2.16b-Y_3.16b}, [SRC], #64 + + // XOR the source blocks with their XTS tweaks + eor TMP0.16b, X_0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor TMP1.16b, X_1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor TMP2.16b, X_2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor TMP3.16b, X_3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + + /* + * De-interleave the 'x' and 'y' elements of each block, i.e. make it so + * that the X[0-3] registers contain only the second halves of blocks, + * and the Y[0-3] registers contain only the first halves of blocks. + * (Speck uses the order (y, x) rather than the more intuitive (x, y).) + */ + uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes + uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes + uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes + uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes + uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes + + // Do the cipher rounds + mov x6, ROUND_KEYS + mov w7, NROUNDS +.Lnext_round_\@: +.if \decrypting + ld1r {ROUND_KEY.\lanes}, [x6] + sub x6, x6, #( \n / 8 ) + _speck_unround_128bytes \n, \lanes +.else + ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 ) + _speck_round_128bytes \n, \lanes +.endif + subs w7, w7, #1 + bne .Lnext_round_\@ + + // Re-interleave the 'x' and 'y' elements of each block + zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes + zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes + zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes + zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes + zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes + zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes + zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes + zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes + + // XOR the encrypted/decrypted blocks with the tweaks calculated earlier + eor X_0.16b, TMP0.16b, TWEAKV0.16b + eor Y_0.16b, Y_0.16b, TWEAKV1.16b + eor X_1.16b, TMP1.16b, TWEAKV2.16b + eor Y_1.16b, Y_1.16b, TWEAKV3.16b + eor X_2.16b, TMP2.16b, TWEAKV4.16b + eor Y_2.16b, Y_2.16b, TWEAKV5.16b + eor X_3.16b, TMP3.16b, TWEAKV6.16b + eor Y_3.16b, Y_3.16b, TWEAKV7.16b + mov TWEAKV0.16b, TWEAKV_NEXT.16b + + // Store the ciphertext in the destination buffer + st1 {X_0.16b-Y_1.16b}, [DST], #64 + st1 {X_2.16b-Y_3.16b}, [DST], #64 + + // Continue if there are more 128-byte chunks remaining + subs NBYTES, NBYTES, #128 + bne .Lnext_128bytes_\@ + + // Store the next tweak and return +.if \n == 64 + st1 {TWEAKV_NEXT.16b}, [TWEAK] +.else + st1 {TWEAKV_NEXT.8b}, [TWEAK] +.endif + ret +.endm + +ENTRY(speck128_xts_encrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=0 +ENDPROC(speck128_xts_encrypt_neon) + +ENTRY(speck128_xts_decrypt_neon) + _speck_xts_crypt n=64, lanes=2d, decrypting=1 +ENDPROC(speck128_xts_decrypt_neon) + +ENTRY(speck64_xts_encrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=0 +ENDPROC(speck64_xts_encrypt_neon) + +ENTRY(speck64_xts_decrypt_neon) + _speck_xts_crypt n=32, lanes=4s, decrypting=1 +ENDPROC(speck64_xts_decrypt_neon) diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c new file mode 100644 index 000000000000..c7d18569d408 --- /dev/null +++ b/arch/arm64/crypto/speck-neon-glue.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS + * (64-bit version; based on the 32-bit version) + * + * Copyright (c) 2018 Google, Inc + */ + +#include <asm/hwcap.h> +#include <asm/neon.h> +#include <asm/simd.h> +#include <crypto/algapi.h> +#include <crypto/gf128mul.h> +#include <crypto/speck.h> +#include <crypto/xts.h> +#include <linux/kernel.h> +#include <linux/module.h> + +/* The assembly functions only handle multiples of 128 bytes */ +#define SPECK_NEON_CHUNK_SIZE 128 + +/* Speck128 */ + +struct speck128_xts_tfm_ctx { + struct speck128_tfm_ctx main_key; + struct speck128_tfm_ctx tweak_key; +}; + +asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes, + speck128_crypt_one_t crypt_one, + speck128_xts_crypt_many_t crypt_many) +{ + struct crypto_blkcipher *tfm = desc->tfm; + const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct blkcipher_walk walk; + le128 tweak; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE); + + crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + le128_xor((le128 *)dst, (const le128 *)src, &tweak); + (*crypt_one)(&ctx->main_key, dst, dst); + le128_xor((le128 *)dst, (const le128 *)dst, &tweak); + gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak); + + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int speck128_xts_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return __speck128_xts_crypt(desc, dst, src, nbytes, + crypto_speck128_encrypt, + speck128_xts_encrypt_neon); +} + +static int speck128_xts_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + return __speck128_xts_crypt(desc, dst, src, nbytes, + crypto_speck128_decrypt, + speck128_xts_decrypt_neon); +} + +static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + int err; + + if (keylen % 2) + return -EINVAL; + + keylen /= 2; + + err = crypto_speck128_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +/* Speck64 */ + +struct speck64_xts_tfm_ctx { + struct speck64_tfm_ctx main_key; + struct speck64_tfm_ctx tweak_key; +}; + +asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds, + void *dst, const void *src, + unsigned int nbytes, void *tweak); + +typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *, + u8 *, const u8 *); +typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *, + const void *, unsigned int, void *); + +static __always_inline int +__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes, + speck64_crypt_one_t crypt_one, + speck64_xts_crypt_many_t crypt_many) +{ + struct crypto_blkcipher *tfm = desc->tfm; + const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm); + struct blkcipher_walk walk; + __le64 tweak; + int err; + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE); + + crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv); + + while (walk.nbytes > 0) { + unsigned int nbytes = walk.nbytes; + u8 *dst = walk.dst.virt.addr; + const u8 *src = walk.src.virt.addr; + + if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) { + unsigned int count; + + count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE); + kernel_neon_begin(); + (*crypt_many)(ctx->main_key.round_keys, + ctx->main_key.nrounds, + dst, src, count, &tweak); + kernel_neon_end(); + dst += count; + src += count; + nbytes -= count; + } + + /* Handle any remainder with generic code */ + while (nbytes >= sizeof(tweak)) { + *(__le64 *)dst = *(__le64 *)src ^ tweak; + (*crypt_one)(&ctx->main_key, dst, dst); + *(__le64 *)dst ^= tweak; + tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^ + ((tweak & cpu_to_le64(1ULL << 63)) ? + 0x1B : 0)); + dst += sizeof(tweak); + src += sizeof(tweak); + nbytes -= sizeof(tweak); + } + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + return err; +} + +static int speck64_xts_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + return __speck64_xts_crypt(desc, dst, src, nbytes, + crypto_speck64_encrypt, + speck64_xts_encrypt_neon); +} + +static int speck64_xts_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + return __speck64_xts_crypt(desc, dst, src, nbytes, + crypto_speck64_decrypt, + speck64_xts_decrypt_neon); +} + +static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm); + int err; + + if (keylen % 2) + return -EINVAL; + + keylen /= 2; + + err = crypto_speck64_setkey(&ctx->main_key, key, keylen); + if (err) + return err; + + return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen); +} + +static struct crypto_alg speck_algs[] = { + { + .cra_name = "xts(speck128)", + .cra_driver_name = "xts-speck128-neon", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = SPECK128_BLOCK_SIZE, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = 2 * SPECK128_128_KEY_SIZE, + .max_keysize = 2 * SPECK128_256_KEY_SIZE, + .ivsize = SPECK128_BLOCK_SIZE, + .setkey = speck128_xts_setkey, + .encrypt = speck128_xts_encrypt, + .decrypt = speck128_xts_decrypt, + } + } + }, { + .cra_name = "xts(speck64)", + .cra_driver_name = "xts-speck64-neon", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_blocksize = SPECK64_BLOCK_SIZE, + .cra_type = &crypto_blkcipher_type, + .cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx), + .cra_alignmask = 7, + .cra_module = THIS_MODULE, + .cra_u = { + .blkcipher = { + .min_keysize = 2 * SPECK64_96_KEY_SIZE, + .max_keysize = 2 * SPECK64_128_KEY_SIZE, + .ivsize = SPECK64_BLOCK_SIZE, + .setkey = speck64_xts_setkey, + .encrypt = speck64_xts_encrypt, + .decrypt = speck64_xts_decrypt, + } + } + } +}; + +static int __init speck_neon_module_init(void) +{ + if (!(elf_hwcap & HWCAP_ASIMD)) + return -ENODEV; + return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +static void __exit speck_neon_module_exit(void) +{ + crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs)); +} + +module_init(speck_neon_module_init); +module_exit(speck_neon_module_exit); + +MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); +MODULE_ALIAS_CRYPTO("xts(speck128)"); +MODULE_ALIAS_CRYPTO("xts-speck128-neon"); +MODULE_ALIAS_CRYPTO("xts(speck64)"); +MODULE_ALIAS_CRYPTO("xts-speck64-neon"); diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 85c4a8981d47..f32b42e8725d 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -48,16 +48,16 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; + int oparg = (int)(encoded_op << 8) >> 20; + int cmparg = (int)(encoded_op << 20) >> 20; int oldval = 0, ret, tmp; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; + oparg = 1U << (oparg & 0x1f); if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) return -EFAULT; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 279978e1a070..1d870666b8d9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -48,8 +48,10 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define VA_START (UL(0xffffffffffffffff) << VA_BITS) -#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) +#define VA_START (UL(0xffffffffffffffff) - \ + (UL(1) << VA_BITS) + 1) +#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ + (UL(1) << (VA_BITS - 1)) + 1) #define KIMAGE_VADDR (MODULES_END) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE) diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h index 74a9d301819f..81c69fe1adc0 100644 --- a/arch/arm64/include/asm/sparsemem.h +++ b/arch/arm64/include/asm/sparsemem.h @@ -18,7 +18,11 @@ #ifdef CONFIG_SPARSEMEM #define MAX_PHYSMEM_BITS 48 +#ifndef CONFIG_MEMORY_HOTPLUG #define SECTION_SIZE_BITS 30 +#else +#define SECTION_SIZE_BITS CONFIG_HOTPLUG_SIZE_BITS +#endif #endif #endif diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 23ee968646a9..b30bdffa3066 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1355,3 +1355,15 @@ int pmd_clear_huge(pmd_t *pmd) pmd_clear(pmd); return 1; } + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +int pud_free_pmd_page(pud_t *pud) +{ + return pud_none(*pud); +} + +int pmd_free_pte_page(pmd_t *pmd) +{ + return pmd_none(*pmd); +} +#endif diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h index a89bddefdacf..139093fab326 100644 --- a/arch/frv/include/asm/timex.h +++ b/arch/frv/include/asm/timex.h @@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void) #define vxtime_lock() do {} while (0) #define vxtime_unlock() do {} while (0) +/* This attribute is used in include/linux/jiffies.h alongside with + * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp + * for frv does not contain another section specification. + */ +#define __jiffy_arch_data __attribute__((__section__(".data"))) + #endif diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index b15933c31b2f..36b2c94a8eb5 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -153,7 +153,7 @@ slot (const struct insn *insn) static int apply_imm64 (struct module *mod, struct insn *insn, uint64_t val) { - if (slot(insn) != 2) { + if (slot(insn) != 1 && slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM64\n", mod->name, slot(insn)); return 0; @@ -165,7 +165,7 @@ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val) static int apply_imm60 (struct module *mod, struct insn *insn, uint64_t val) { - if (slot(insn) != 2) { + if (slot(insn) != 1 && slot(insn) != 2) { printk(KERN_ERR "%s: invalid slot number %d for IMM60\n", mod->name, slot(insn)); return 0; diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c index 9ab48ff80c1c..6d11ae581ea7 100644 --- a/arch/mips/ath25/board.c +++ b/arch/mips/ath25/board.c @@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size) } board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL); + if (!board_data) + goto error; ath25_board.config = (struct ath25_boarddata *)board_data; memcpy_fromio(board_data, bcfg, 0x100); if (broken_boarddata) { diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 4f9eb0576884..10d0b2140375 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -2246,6 +2246,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, } host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); + if (!host_data) + return -ENOMEM; raw_spin_lock_init(&host_data->lock); addr = of_get_address(ciu_node, 0, NULL, NULL); diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index daba1f9a4f79..174aedce3167 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t; #define flush_insn_slot(p) \ do { \ - flush_icache_range((unsigned long)p->addr, \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ (unsigned long)p->addr + \ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ } while (0) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 832e2167d00f..ef7c02af7522 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -18,6 +18,10 @@ #include <asm-generic/pgtable-nopmd.h> +#ifdef CONFIG_HIGHMEM +#include <asm/highmem.h> +#endif + extern int temp_tlb_entry; /* @@ -61,7 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, #define VMALLOC_START MAP_BASE -#define PKMAP_BASE (0xfe000000UL) +#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) +#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 78cf8c2f1de0..4874712b475e 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus) return; } - if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi0", NULL)) + if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) panic("Can't request IPI0 interrupt"); - if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, - "smp_ipi1", NULL)) + if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, + IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) panic("Can't request IPI1 interrupt"); } diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c index adc6911ba748..b19a3c506b1e 100644 --- a/arch/mips/mm/pgtable-32.c +++ b/arch/mips/mm/pgtable-32.c @@ -51,15 +51,15 @@ void __init pagetable_init(void) /* * Fixed mappings: */ - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); #ifdef CONFIG_HIGHMEM /* * Permanent kmaps: */ vaddr = PKMAP_BASE; - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + __pgd_offset(vaddr); pud = pud_offset(pgd, vaddr); diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 1a8c96035716..c0c1e9529dbd 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -527,7 +527,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) u32 sflags, tmp_flags; /* Adjust the stack pointer */ - emit_stack_offset(-align_sp(offset), ctx); + if (offset) + emit_stack_offset(-align_sp(offset), ctx); tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; /* sflags is essentially a bitmap */ @@ -579,7 +580,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx, emit_load_stack_reg(r_ra, r_sp, real_off, ctx); /* Restore the sp and discard the scrach memory */ - emit_stack_offset(align_sp(offset), ctx); + if (offset) + emit_stack_offset(align_sp(offset), ctx); } static unsigned int get_stack_depth(struct jit_ctx *ctx) @@ -626,8 +628,14 @@ static void build_prologue(struct jit_ctx *ctx) if (ctx->flags & SEEN_X) emit_jit_reg_move(r_X, r_zero, ctx); - /* Do not leak kernel data to userspace */ - if (bpf_needs_clear_a(&ctx->skf->insns[0])) + /* + * Do not leak kernel data to userspace, we only need to clear + * r_A if it is ever used. In fact if it is never used, we + * will not save/restore it, so clearing it in this case would + * corrupt the state of the caller. + */ + if (bpf_needs_clear_a(&ctx->skf->insns[0]) && + (ctx->flags & SEEN_A)) emit_jit_reg_move(r_A, r_zero, ctx); } diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index 5d2e0c8d29c0..88a2075305d1 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S @@ -90,18 +90,14 @@ FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset - .set reorder - lh $r_A, 0(t1) - .set noreorder + lhu $r_A, 0(t1) #ifdef CONFIG_CPU_LITTLE_ENDIAN # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) - wsbh t0, $r_A - seh $r_A, t0 + wsbh $r_A, $r_A # else - sll t0, $r_A, 24 - andi t1, $r_A, 0xff00 - sra t0, t0, 16 - srl t1, t1, 8 + sll t0, $r_A, 8 + srl t1, $r_A, 8 + andi t0, t0, 0xff00 or $r_A, t0, t1 # endif #endif @@ -115,7 +111,7 @@ FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset - lb $r_A, 0(t1) + lbu $r_A, 0(t1) jr $r_ra move $r_ret, zero END(sk_load_byte) @@ -139,6 +135,11 @@ FEXPORT(sk_load_byte_positive) * (void *to) is returned in r_s0 * */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN +#define DS_OFFSET(SIZE) (4 * SZREG) +#else +#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) +#endif #define bpf_slow_path_common(SIZE) \ /* Quick check. Are we within reasonable boundaries? */ \ LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ @@ -150,7 +151,7 @@ FEXPORT(sk_load_byte_positive) PTR_LA t0, skb_copy_bits; \ PTR_S $r_ra, (5 * SZREG)($r_sp); \ /* Assign low slot to a2 */ \ - move a2, $r_sp; \ + PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ jalr t0; \ /* Reset our destination slot (DS but it's ok) */ \ INT_S zero, (4 * SZREG)($r_sp); \ diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c index ee117c4bc4a3..8037a4bd84fd 100644 --- a/arch/mips/ralink/reset.c +++ b/arch/mips/ralink/reset.c @@ -96,16 +96,9 @@ static void ralink_restart(char *command) unreachable(); } -static void ralink_halt(void) -{ - local_irq_disable(); - unreachable(); -} - static int __init mips_reboot_setup(void) { _machine_restart = ralink_restart; - _machine_halt = ralink_halt; return 0; } diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 3140c19c448c..70b379ee6b7e 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -132,7 +132,19 @@ extern long long virt_phys_offset; #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +#ifdef CONFIG_PPC_BOOK3S_64 +/* + * On hash the vmalloc and other regions alias to the kernel region when passed + * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can + * return true for some vmalloc addresses, which is incorrect. So explicitly + * check that the address is in the kernel region. + */ +#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \ + pfn_valid(virt_to_pfn(kaddr))) +#else #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) +#endif /* * On Book-E parts we need __va to parse the device tree and we can't diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1be1092c7204..9baba9576e99 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -686,12 +686,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val) static void start_cpu_decrementer(void) { #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) + unsigned int tcr; + /* Clear any pending timer interrupts */ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); - /* Enable decrementer interrupt */ - mtspr(SPRN_TCR, TCR_DIE); -#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ + tcr = mfspr(SPRN_TCR); + /* + * The watchdog may have already been enabled by u-boot. So leave + * TRC[WP] (Watchdog Period) alone. + */ + tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */ + tcr |= TCR_DIE; /* Enable decrementer */ + mtspr(SPRN_TCR, tcr); +#endif } void __init generic_calibrate_decr(void) diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 79ad35abd196..ddec22828673 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -177,12 +177,15 @@ map_again: ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, hpsize, hpsize, MMU_SEGSIZE_256M); - if (ret < 0) { + if (ret == -1) { /* If we couldn't map a primary PTE, try a secondary */ hash = ~hash; vflags ^= HPTE_V_SECONDARY; attempt++; goto map_again; + } else if (ret < 0) { + r = -EIO; + goto out_unlock; } else { trace_kvm_book3s_64_mmu_map(rflags, hpteg, vpn, hpaddr, orig_pte); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 64891b081ad5..81313844d81c 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -625,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, kvmppc_mmu_unmap_page(vcpu, &pte); } /* The guest's PTE is not mapped yet. Map on the host */ - kvmppc_mmu_map_page(vcpu, &pte, iswrite); + if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { + /* Exit KVM if mapping failed */ + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } if (data) vcpu->stat.sp_storage++; else if (vcpu->arch.mmu.is_dcbz32(vcpu) && diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index f2c75a1e0536..0d91baf63fed 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c @@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) pteg_addr = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); - copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); + ret = H_FUNCTION; + if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) + goto done; hpte = pteg; ret = H_PTEG_FULL; @@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); pteg_addr += i * HPTE_SIZE; - copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); + ret = H_FUNCTION; + if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) + goto done; kvmppc_set_gpr(vcpu, 4, pte_index | i); ret = H_SUCCESS; @@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + ret = H_FUNCTION; + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) + goto done; pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); @@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) goto done; - copy_to_user((void __user *)pteg, &v, sizeof(v)); + ret = H_FUNCTION; + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) + goto done; rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); @@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) } pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { + ret = H_FUNCTION; + break; + } pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); @@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) tsh |= H_BULK_REMOVE_NOT_FOUND; } else { /* Splat the pteg in (userland) hpt */ - copy_to_user((void __user *)pteg, &v, sizeof(v)); + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { + ret = H_FUNCTION; + break; + } rb = compute_tlbie_rb(pte[0], pte[1], tsh & H_BULK_REMOVE_PTEX); @@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); + ret = H_FUNCTION; + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) + goto done; pte[0] = be64_to_cpu((__force __be64)pte[0]); pte[1] = be64_to_cpu((__force __be64)pte[1]); @@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); pte[0] = (__force u64)cpu_to_be64(pte[0]); pte[1] = (__force u64)cpu_to_be64(pte[1]); - copy_to_user((void __user *)pteg, pte, sizeof(pte)); + ret = H_FUNCTION; + if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) + goto done; ret = H_SUCCESS; done: diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a67c6d781c52..d154e333f76b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -294,7 +294,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * can result in fault, which will cause a deadlock when called with * mmap_sem held */ - if (user_mode(regs)) + if (!is_exec && user_mode(regs)) store_update_sp = store_updates_sp(regs); if (user_mode(regs)) diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index be6212ddbf06..7e42e3ec2142 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -174,6 +174,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, if (!dump_skip(cprm, roundup(cprm->written - total + sz, 4) - cprm->written)) goto Eio; + + rc = 0; out: free_page((unsigned long)buf); return rc; diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 0f41a8286378..da4f3f2a8186 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -21,8 +21,14 @@ SECTIONS { . = 0x00000000; .text : { - _text = .; /* Text and read-only data */ + /* Text and read-only data */ HEAD_TEXT + /* + * E.g. perf doesn't like symbols starting at address zero, + * therefore skip the initial PSW and channel program located + * at address zero and let _text start at 0x200. + */ + _text = 0x200; TEXT_TEXT SCHED_TEXT LOCK_TEXT diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 59d503866431..9cc600b2d68c 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) lp->rcv_nxt = p->seqid; + /* + * If this is a control-only packet, there is nothing + * else to do but advance the rx queue since the packet + * was already processed above. + */ if (!(p->type & LDC_DATA)) { new = rx_advance(lp, new); - goto no_data; + break; } if (p->stype & (LDC_ACK | LDC_NACK)) { err = data_ack_nack(lp, p); diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 82fac1b62626..568930248ad0 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -200,6 +200,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) LDFLAGS := -m elf_$(UTS_MACHINE) +# +# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to +# the linker to force 2MB page size regardless of the default page size used +# by the linker. +# +ifdef CONFIG_X86_64 +LDFLAGS += $(call ld-option, -z max-page-size=0x200000) +endif + # Speed up the build KBUILD_CFLAGS += -pipe # Workaround for a gcc prelease that unfortunately was shipped in a suse release diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 79dac1758e7c..16df89c30c20 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -366,6 +366,10 @@ static void parse_elf(void *output) switch (phdr->p_type) { case PT_LOAD: +#ifdef CONFIG_X86_64 + if ((phdr->p_align % 0x200000) != 0) + error("Alignment of LOAD segment isn't multiple of 2MB"); +#endif #ifdef CONFIG_RELOCATABLE dest = output; dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig index 65ed8c8f8444..dde67eacd8e2 100644 --- a/arch/x86/configs/i386_ranchu_defconfig +++ b/arch/x86/configs/i386_ranchu_defconfig @@ -240,6 +240,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +CONFIG_KEYBOARD_GOLDFISH_ROTARY=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_XPAD=y diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig new file mode 100644 index 000000000000..512d009146dd --- /dev/null +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -0,0 +1,442 @@ +CONFIG_POSIX_MQUEUE=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +# CONFIG_PCSPKR_PLATFORM is not set +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_SMP=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_MCORE2=y +CONFIG_PROCESSOR_SELECT=y +# CONFIG_CPU_SUP_CENTAUR is not set +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +# CONFIG_MICROCODE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_TRANSPARENT_HUGEPAGE=y +# CONFIG_MTRR is not set +CONFIG_HZ_100=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x200000 +CONFIG_RANDOMIZE_BASE=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0 reboot=p" +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_ACPI_PROCFS_POWER=y +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_THERMAL is not set +# CONFIG_X86_PM_TIMER is not set +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_X86_ACPI_CPUFREQ=y +# CONFIG_X86_ACPI_CPUFREQ_CPB is not set +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_MSI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_RFKILL=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEBUG_DEVRES=y +CONFIG_OF=y +CONFIG_OF_UNITTEST=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_VIRTIO=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +# CONFIG_ETHERNET is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +# CONFIG_USB_NET_CDCETHER is not set +# CONFIG_USB_NET_CDC_NCM is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_MAC80211_HWSIM=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=48 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_INTEL is not set +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HPET=y +# CONFIG_HPET_MMAP_DEFAULT is not set +# CONFIG_DEVPORT is not set +# CONFIG_ACPI_I2C_OPREGION is not set +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_PTP_1588_CLOCK=y +CONFIG_GPIOLIB=y +# CONFIG_HWMON is not set +# CONFIG_X86_PKG_TEMP_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_SOFT_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +# CONFIG_DVB_TUNER_DIB0070 is not set +# CONFIG_DVB_TUNER_DIB0090 is not set +# CONFIG_VGA_ARB is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +# CONFIG_HID_GENERIC is not set +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_GADGET=y +CONFIG_USB_DUMMY_HCD=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ANDROID_VSOC=y +CONFIG_ION=y +# CONFIG_X86_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_SDCARD_FS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_IO_DELAY_NONE=y +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_OPTIMIZE_INLINING=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_ECHAINIV=y diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig index d977bd91e390..e8ed8eef62ec 100644 --- a/arch/x86/configs/x86_64_ranchu_defconfig +++ b/arch/x86/configs/x86_64_ranchu_defconfig @@ -237,6 +237,7 @@ CONFIG_INPUT_EVDEV=y CONFIG_INPUT_KEYRESET=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +CONFIG_KEYBOARD_GOLDFISH_ROTARY=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_JOYSTICK=y CONFIG_JOYSTICK_XPAD=y diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 8648158f3916..f8fe11d24cde 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -66,8 +66,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src); int err; - fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; - err = blkcipher_walk_virt(desc, walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; @@ -79,6 +77,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { + fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way; do { fn(ctx, wdst, wsrc); diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 113de1592cc1..a619254275f2 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -178,12 +178,14 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) jnz tracesys entry_SYSCALL_64_fastpath: #if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax + cmpq $NR_syscalls, %rax #else andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax + cmpl $NR_syscalls, %eax #endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ + jae 1f /* return -ENOSYS (already in pt_regs->ax) */ + sbb %rcx, %rcx /* array_index_mask_nospec() */ + and %rcx, %rax movq %r10, %rcx #ifdef CONFIG_RETPOLINE movq sys_call_table(, %rax, 8), %rax @@ -276,12 +278,14 @@ tracesys_phase2: RESTORE_C_REGS_EXCEPT_RAX RESTORE_EXTRA_REGS #if __SYSCALL_MASK == ~0 - cmpq $__NR_syscall_max, %rax + cmpq $NR_syscalls, %rax #else andl $__SYSCALL_MASK, %eax - cmpl $__NR_syscall_max, %eax + cmpl $NR_syscalls, %eax #endif - ja 1f /* return -ENOSYS (already in pt_regs->ax) */ + jae 1f /* return -ENOSYS (already in pt_regs->ax) */ + sbb %rcx, %rcx /* array_index_mask_nospec() */ + and %rcx, %rax movq %r10, %rcx /* fixup for C */ #ifdef CONFIG_RETPOLINE movq sys_call_table(, %rax, 8), %rax @@ -1020,7 +1024,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK +idtentry int3 do_int3 has_error_code=0 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 66094a0473a8..249f1c769f21 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -195,4 +195,41 @@ static inline void vmexit_fill_RSB(void) } #endif /* __ASSEMBLY__ */ + +/* + * Below is used in the eBPF JIT compiler and emits the byte sequence + * for the following assembly: + * + * With retpolines configured: + * + * callq do_rop + * spec_trap: + * pause + * lfence + * jmp spec_trap + * do_rop: + * mov %rax,(%rsp) + * retq + * + * Without retpolines configured: + * + * jmp *%rax + */ +#ifdef CONFIG_RETPOLINE +# define RETPOLINE_RAX_BPF_JIT_SIZE 17 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT1_off32(0xE8, 7); /* callq do_rop */ \ + /* spec_trap: */ \ + EMIT2(0xF3, 0x90); /* pause */ \ + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ + /* do_rop: */ \ + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ + EMIT1(0xC3); /* retq */ +#else +# define RETPOLINE_RAX_BPF_JIT_SIZE 2 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT2(0xFF, 0xE0); /* jmp *%rax */ +#endif + #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 6b6e16d813b9..dd11f5cb4149 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -310,6 +310,7 @@ enum vmcs_field { #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index a41e523536a2..592e260ba05b 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -91,8 +91,12 @@ out_data: return NULL; } -static void free_apic_chip_data(struct apic_chip_data *data) +static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data) { +#ifdef CONFIG_X86_IO_APIC + if (virq < nr_legacy_irqs()) + legacy_irq_data[virq] = NULL; +#endif if (data) { free_cpumask_var(data->domain); free_cpumask_var(data->old_domain); @@ -316,11 +320,7 @@ static void x86_vector_free_irqs(struct irq_domain *domain, apic_data = irq_data->chip_data; irq_domain_reset_irq_data(irq_data); raw_spin_unlock_irqrestore(&vector_lock, flags); - free_apic_chip_data(apic_data); -#ifdef CONFIG_X86_IO_APIC - if (virq + i < nr_legacy_irqs()) - legacy_irq_data[virq + i] = NULL; -#endif + free_apic_chip_data(virq + i, apic_data); } } } @@ -361,7 +361,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, err = assign_irq_vector_policy(virq + i, node, data, info); if (err) { irq_data->chip_data = NULL; - free_apic_chip_data(data); + free_apic_chip_data(virq + i, data); goto error; } } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ecaf7c9baf75..2bbc74f8a4a8 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -175,8 +175,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) } if (i >= ARRAY_SIZE(mitigation_options)) { - pr_err("unknown option (%s). Switching to AUTO select\n", - mitigation_options[i].option); + pr_err("unknown option (%s). Switching to AUTO select\n", arg); return SPECTRE_V2_CMD_AUTO; } } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 364fbad72e60..6edb9530d7e9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -60,6 +60,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex); smp_load_acquire(&(p)); \ }) +/* sysfs synchronization */ +static DEFINE_MUTEX(mce_sysfs_mutex); + #define CREATE_TRACE_POINTS #include <trace/events/mce.h> @@ -2220,6 +2223,7 @@ static ssize_t set_ignore_ce(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ @@ -2232,6 +2236,8 @@ static ssize_t set_ignore_ce(struct device *s, on_each_cpu(mce_enable_ce, (void *)1, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2244,6 +2250,7 @@ static ssize_t set_cmci_disabled(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ @@ -2255,6 +2262,8 @@ static ssize_t set_cmci_disabled(struct device *s, on_each_cpu(mce_enable_ce, NULL, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2262,8 +2271,19 @@ static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = device_store_int(s, attr, buf, size); + unsigned long old_check_interval = check_interval; + ssize_t ret = device_store_ulong(s, attr, buf, size); + + if (check_interval == old_check_interval) + return ret; + + if (check_interval < 1) + check_interval = 1; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); + return ret; } diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 5cc2242d77c6..7b79c80ce029 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -2716,7 +2716,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left) X86_CONFIG(.event=0xc0, .umask=0x01)) { if (left < 128) left = 128; - left &= ~0x3fu; + left &= ~0x3fULL; } return left; } diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index be22f5a2192e..4e3b8a587c88 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -418,6 +418,7 @@ struct legacy_pic default_legacy_pic = { }; struct legacy_pic *legacy_pic = &default_legacy_pic; +EXPORT_SYMBOL(legacy_pic); static int __init i8259A_init_ops(void) { diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3eb804335458..30bc63876035 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -50,6 +50,8 @@ #include <linux/kallsyms.h> #include <linux/ftrace.h> #include <linux/kasan.h> +#include <linux/moduleloader.h> + #include <asm/cacheflush.h> #include <asm/desc.h> #include <asm/pgtable.h> @@ -196,6 +198,8 @@ retry: return (opcode != 0x62 && opcode != 0x67); case 0x70: return 0; /* can't boost conditional jump */ + case 0x90: + return opcode != 0x9a; /* can't boost call far */ case 0xc0: /* can't boost software-interruptions */ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; @@ -400,10 +404,20 @@ int __copy_instruction(u8 *dest, u8 *src) return length; } +/* Recover page to RW mode before releasing it */ +void free_insn_page(void *page) +{ + set_memory_nx((unsigned long)page & PAGE_MASK, 1); + set_memory_rw((unsigned long)page & PAGE_MASK, 1); + module_memfree(page); +} + static int arch_copy_kprobe(struct kprobe *p) { int ret; + set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Copy an instruction with recovering if other optprobe modifies it.*/ ret = __copy_instruction(p->ainsn.insn, p->addr); if (!ret) @@ -418,6 +432,8 @@ static int arch_copy_kprobe(struct kprobe *p) else p->ainsn.boostable = -1; + set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Check whether the instruction modifies Interrupt Flag or not */ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index ea8e2b846101..7aba9d6475a5 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -370,6 +370,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, } buf = (u8 *)op->optinsn.insn; + set_memory_rw((unsigned long)buf & PAGE_MASK, 1); /* Copy instructions into the out-of-line buffer */ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); @@ -392,6 +393,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, (u8 *)op->kp.addr + op->optinsn.size); + set_memory_ro((unsigned long)buf & PAGE_MASK, 1); + flush_icache_range((unsigned long) buf, (unsigned long) buf + TMPL_END_IDX + op->optinsn.size + RELATIVEJUMP_SIZE); diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 819ab3f9c9c7..ca6e65250b1a 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -519,6 +519,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: value -= (u64)address; *(u32 *)location = value; break; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 005c03e93fc5..94779f66bf49 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -170,19 +170,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, case R_X86_64_NONE: break; case R_X86_64_64: + if (*(u64 *)loc != 0) + goto invalid_relocation; *(u64 *)loc = val; break; case R_X86_64_32: + if (*(u32 *)loc != 0) + goto invalid_relocation; *(u32 *)loc = val; if (val != *(u32 *)loc) goto overflow; break; case R_X86_64_32S: + if (*(s32 *)loc != 0) + goto invalid_relocation; *(s32 *)loc = val; if ((s64)val != *(s32 *)loc) goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: + if (*(u32 *)loc != 0) + goto invalid_relocation; val -= (u64)loc; *(u32 *)loc = val; #if 0 @@ -198,6 +207,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } return 0; +invalid_relocation: + pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n", + (int)ELF64_R_TYPE(rel[i].r_info), loc, val); + return -ENOEXEC; + overflow: pr_err("overflow in relocation type %d val %Lx\n", (int)ELF64_R_TYPE(rel[i].r_info), val); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 22b81f35c500..1fbd2631be60 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -480,7 +480,6 @@ do_general_protection(struct pt_regs *regs, long error_code) } NOKPROBE_SYMBOL(do_general_protection); -/* May run on IST stack. */ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { #ifdef CONFIG_DYNAMIC_FTRACE @@ -495,7 +494,15 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) if (poke_int3_handler(regs)) return; + /* + * Use ist_enter despite the fact that we don't use an IST stack. + * We can be called from a kprobe in non-CONTEXT_KERNEL kernel + * mode or even during context tracking state changes. + * + * This means that we can't schedule. That's okay. + */ ist_enter(regs); + RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, @@ -512,15 +519,9 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) SIGTRAP) == NOTIFY_STOP) goto exit; - /* - * Let others (NMI) know that the debug stack is in use - * as we may switch to the interrupt stack. - */ - debug_stack_usage_inc(); preempt_conditional_sti(regs); do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); preempt_conditional_cli(regs); - debug_stack_usage_dec(); exit: ist_exit(regs); } @@ -886,19 +887,16 @@ void __init trap_init(void) cpu_init(); /* - * X86_TRAP_DB and X86_TRAP_BP have been set - * in early_trap_init(). However, ITS works only after - * cpu_init() loads TSS. See comments in early_trap_init(). + * X86_TRAP_DB was installed in early_trap_init(). However, + * IST works only after cpu_init() loads TSS. See comments + * in early_trap_init(). */ set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); - /* int3 can be called from all */ - set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); x86_init.irqs.trap_init(); #ifdef CONFIG_X86_64 memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); set_nmi_gate(X86_TRAP_DB, &debug); - set_nmi_gate(X86_TRAP_BP, &int3); #endif } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index c7c4d9c51e99..eb02087650d2 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -365,6 +365,8 @@ static int __init tsc_setup(char *str) tsc_clocksource_reliable = 1; if (!strncmp(str, "noirqtime", 9)) no_sched_irq_time = 1; + if (!strcmp(str, "unstable")) + mark_tsc_unstable("boot parameter"); return 1; } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 510e80da7de4..af57736a0309 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -715,7 +715,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) return; check_vip: - if (VEFLAGS & X86_EFLAGS_VIP) { + if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == + (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { save_v86_state(regs, VM86_STI); return; } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 2038e5bacce6..42654375b73f 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1386,6 +1386,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, */ if (var->unusable) var->db = 0; + /* This is symmetric with svm_set_segment() */ var->dpl = to_svm(vcpu)->vmcb->save.cpl; break; } @@ -1531,18 +1532,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, s->base = var->base; s->limit = var->limit; s->selector = var->selector; - if (var->unusable) - s->attrib = 0; - else { - s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); - s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; - s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; - s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; - s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; - s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; - s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; - s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; - } + s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); + s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; + s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; + s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; + s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; + s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; + s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; + s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; /* * This is always accurate, except if SYSRET returned to a segment @@ -1551,7 +1548,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, * would entail passing the CPL to userspace and back. */ if (seg == VCPU_SREG_SS) - svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; + /* This is symmetric with svm_get_segment() */ + svm->vmcb->save.cpl = (var->dpl & 3); mark_dirty(svm->vmcb, VMCB_SEG); } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 849517805eef..528b4352fa99 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1011,6 +1011,13 @@ static inline bool is_machine_check(u32 intr_info) (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } +/* Undocumented: icebp/int1 */ +static inline bool is_icebp(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); +} + static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; @@ -5333,7 +5340,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; - if (!(dr6 & ~DR6_RESERVED)) /* icebp */ + if (is_icebp(intr_info)) skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); @@ -7650,11 +7657,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, { unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); int cr = exit_qualification & 15; - int reg = (exit_qualification >> 8) & 15; - unsigned long val = kvm_register_readl(vcpu, reg); + int reg; + unsigned long val; switch ((exit_qualification >> 4) & 3) { case 0: /* mov to cr */ + reg = (exit_qualification >> 8) & 15; + val = kvm_register_readl(vcpu, reg); switch (cr) { case 0: if (vmcs12->cr0_guest_host_mask & @@ -7709,6 +7718,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, * lmsw can change bits 1..3 of cr0, and only set bit 0 of * cr0. Other attempted changes are ignored, with no exit. */ + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; if (vmcs12->cr0_guest_host_mask & 0xe & (val ^ vmcs12->cr0_read_shadow)) return true; diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S index 7e48807b2fa1..45a53dfe1859 100644 --- a/arch/x86/lib/csum-copy_64.S +++ b/arch/x86/lib/csum-copy_64.S @@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic) movq %r12, 3*8(%rsp) movq %r14, 4*8(%rsp) movq %r13, 5*8(%rsp) - movq %rbp, 6*8(%rsp) + movq %r15, 6*8(%rsp) movq %r8, (%rsp) movq %r9, 1*8(%rsp) @@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic) /* main loop. clear in 64 byte blocks */ /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ /* r11: temp3, rdx: temp4, r12 loopcnt */ - /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */ + /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */ .p2align 4 .Lloop: source @@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic) source movq 32(%rdi), %r10 source - movq 40(%rdi), %rbp + movq 40(%rdi), %r15 source movq 48(%rdi), %r14 source @@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic) adcq %r11, %rax adcq %rdx, %rax adcq %r10, %rax - adcq %rbp, %rax + adcq %r15, %rax adcq %r14, %rax adcq %r13, %rax @@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic) dest movq %r10, 32(%rsi) dest - movq %rbp, 40(%rsi) + movq %r15, 40(%rsi) dest movq %r14, 48(%rsi) dest @@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic) movq 3*8(%rsp), %r12 movq 4*8(%rsp), %r14 movq 5*8(%rsp), %r13 - movq 6*8(%rsp), %rbp + movq 6*8(%rsp), %r15 addq $7*8, %rsp ret diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e830c71a1323..e0a34b0d381e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -287,7 +287,7 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) + if (pmd_large(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -407,7 +407,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); - if (pud_huge(*pud)) + if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); @@ -418,7 +418,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); - if (pmd_huge(*pmd)) + if (pmd_large(*pmd)) return 0; pte_ref = pte_offset_kernel(pmd_ref, address); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index dbc27a2b4ad5..c013326a0d7a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -666,4 +666,52 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } + +/** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. + * + * Context: The pud range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pud_free_pmd_page(pud_t *pud) +{ + pmd_t *pmd; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_free_pte_page(&pmd[i])) + return 0; + + pud_clear(pud); + free_page((unsigned long)pmd); + + return 1; +} + +/** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. + * + * Context: The pmd range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pmd_free_pte_page(pmd_t *pmd) +{ + pte_t *pte; + + if (pmd_none(*pmd)) + return 1; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); + free_page((unsigned long)pte); + + return 1; +} #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 33c42b826791..dd9a861fd526 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -12,6 +12,7 @@ #include <linux/filter.h> #include <linux/if_vlan.h> #include <asm/cacheflush.h> +#include <asm/nospec-branch.h> #include <linux/bpf.h> int bpf_jit_enable __read_mostly; @@ -269,7 +270,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 43 /* number of bytes to jump */ +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -278,7 +279,7 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 32 +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ @@ -292,7 +293,7 @@ static void emit_bpf_tail_call(u8 **pprog) * goto out; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ -#define OFFSET3 10 +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JE, OFFSET3); /* je out */ label3 = cnt; @@ -305,7 +306,7 @@ static void emit_bpf_tail_call(u8 **pprog) * rdi == ctx (1st arg) * rax == prog->bpf_func + prologue_size */ - EMIT2(0xFF, 0xE0); /* jmp rax */ + RETPOLINE_RAX_BPF_JIT(); /* out: */ BUILD_BUG_ON(cnt - label1 != OFFSET1); @@ -1076,7 +1077,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) * may converge on the last pass. In such case do one more * pass to emit the final image */ - for (pass = 0; pass < 10 || image; pass++) { + for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { image = NULL; @@ -1099,6 +1100,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog) goto out; } oldproglen = proglen; + cond_resched(); } if (bpf_jit_enable > 1) diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 73eb7fd4aec4..5b6c8486a0be 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, break; case R_X86_64_PC32: + case R_X86_64_PLT32: /* * PC relative relocations don't need to be adjusted unless * referencing a percpu symbol. + * + * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. */ if (is_percpu_sym(sym, symname)) add_reloc(&relocs32neg, offset); diff --git a/block/bio-integrity.c b/block/bio-integrity.c index f6325d573c10..6e091ccadcd4 100644 --- a/block/bio-integrity.c +++ b/block/bio-integrity.c @@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio) if (!bio_is_rw(bio)) return false; + if (!bio_sectors(bio)) + return false; + /* Already protected? */ if (bio_integrity(bio)) return false; diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 37ade035c956..9075f08b392a 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1078,10 +1078,8 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); - if (IS_ERR(blkg)) { - blkg_free(new_blkg); + if (IS_ERR(blkg)) return PTR_ERR(blkg); - } q->root_blkg = blkg; q->root_rl.blkg = blkg; diff --git a/block/blk-mq.c b/block/blk-mq.c index 1452db06ba45..d65ddc18d7e4 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1252,13 +1252,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_queue_bounce(q, &bio); + blk_queue_split(q, &bio, q->bio_split); + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { bio_io_error(bio); return BLK_QC_T_NONE; } - blk_queue_split(q, &bio, q->bio_split); - if (!is_flush_fua && !blk_queue_nomerges(q) && blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return BLK_QC_T_NONE; @@ -1634,7 +1634,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, { unsigned flush_start_tag = set->queue_depth; - blk_mq_tag_idle(hctx); + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_idle(hctx); if (set->ops->exit_request) set->ops->exit_request(set->driver_data, diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 2149a1ddbacf..17bdd6b55beb 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -505,6 +505,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg) static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, unsigned long expires) { + unsigned long max_expire = jiffies + 8 * throtl_slice; + + /* + * Since we are adjusting the throttle limit dynamically, the sleep + * time calculated according to previous limit might be invalid. It's + * possible the cgroup sleep time is very long and no other cgroups + * have IO running so notify the limit changes. Make sure the cgroup + * doesn't sleep too long to avoid the missed notification. + */ + if (time_after(expires, max_expire)) + expires = max_expire; mod_timer(&sq->pending_timer, expires); throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", expires - jiffies, jiffies); diff --git a/block/partition-generic.c b/block/partition-generic.c index 19cf33b91a5a..f75be6a4411f 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -320,8 +320,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (info) { struct partition_meta_info *pinfo = alloc_part_info(disk); - if (!pinfo) + if (!pinfo) { + err = -ENOMEM; goto out_free_stats; + } memcpy(pinfo, info, sizeof(*info)); p->info = pinfo; } diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 5610cd537da7..7d8d50c11ce7 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -300,7 +300,9 @@ static void parse_bsd(struct parsed_partitions *state, continue; bsd_start = le32_to_cpu(p->p_offset); bsd_size = le32_to_cpu(p->p_size); - if (memcmp(flavour, "bsd\0", 4) == 0) + /* FreeBSD has relative offset if C partition offset is zero */ + if (memcmp(flavour, "bsd\0", 4) == 0 && + le32_to_cpu(l->d_partitions[2].p_offset) == 0) bsd_start += offset; if (offset == bsd_start && size == bsd_size) /* full parent partition, we have it already */ diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 new file mode 100644 index 000000000000..b3d89109fe75 --- /dev/null +++ b/build.config.cuttlefish.x86_64 @@ -0,0 +1,15 @@ +ARCH=x86_64 +BRANCH=android-4.4 +CLANG_TRIPLE=x86_64-linux-gnu- +CROSS_COMPILE=x86_64-linux-androidkernel- +DEFCONFIG=x86_64_cuttlefish_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +POST_DEFCONFIG_CMDS="check_defconfig" +CLANG_PREBUILT_BIN=prebuilts/clang/host/linux-x86/clang-4630689/bin +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" diff --git a/crypto/ahash.c b/crypto/ahash.c index 7006dbfd39bd..6978ad86e516 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -91,13 +91,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (nbytes && walk->offset & alignmask && !err) { walk->offset = ALIGN(walk->offset, alignmask + 1); - walk->data += walk->offset; - nbytes = min(nbytes, ((unsigned int)(PAGE_SIZE)) - walk->offset); walk->entrylen -= nbytes; - return nbytes; + if (nbytes) { + walk->data += walk->offset; + return nbytes; + } } if (walk->flags & CRYPTO_ALG_ASYNC) diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 84f8d4d8b6bc..09f706b7b06e 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan, dma_addr_t dma_dest[2]; int src_off = 0; - if (submit->flags & ASYNC_TX_FENCE) - dma_flags |= DMA_PREP_FENCE; - while (src_cnt > 0) { submit->flags = flags_orig; pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); @@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan, if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Drivers force forward progress in case they can not provide * a descriptor diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 10ce48e16ebf..d830705f8a18 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_FUNCTION_TRACE(acpi_enable_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { @@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags) ACPI_FUNCTION_TRACE(acpi_disable_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { @@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event) ACPI_FUNCTION_TRACE(acpi_clear_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index e54bc2aa7a88..a05b3b79b987 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) (u32)(aml_offset + sizeof(struct acpi_table_header))); + ACPI_ERROR((AE_INFO, + "Aborting disassembly, AML byte code is corrupt")); + /* Dump the context surrounding the invalid opcode */ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. @@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) sizeof(struct acpi_table_header) - 16)); acpi_os_printf(" */\n"); + + /* + * Just abort the disassembly, cannot continue because the + * parser is essentially lost. The disassembler can then + * randomly fail because an ill-constructed parse tree + * can result. + */ + return_ACPI_STATUS(AE_AML_BAD_OPCODE); #endif } @@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, if (status == AE_CTRL_PARSE_CONTINUE) { return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); } + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } /* Create Op structure and append to parent's argument list */ diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index d176e0ece470..2946e2846573 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) */ int acpi_map_pxm_to_online_node(int pxm) { - int node, n, dist, min_dist; + int node, min_node; node = acpi_map_pxm_to_node(pxm); if (node == NUMA_NO_NODE) node = 0; + min_node = node; if (!node_online(node)) { - min_dist = INT_MAX; + int min_dist = INT_MAX, dist, n; + for_each_online_node(n) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; - node = n; + min_node = n; } } } - return node; + return min_node; } EXPORT_SYMBOL(acpi_map_pxm_to_online_node); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 8a10a7ae6a8a..c8e169e46673 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -131,9 +131,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, quirk = &prt_quirks[i]; /* All current quirks involve link devices, not GSIs */ - if (!prt->source) - continue; - if (dmi_check_system(quirk->system) && entry->id.segment == quirk->segment && entry->id.bus == quirk->bus && diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 6a082d4de12c..24a793957bc0 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -28,97 +28,97 @@ static struct pmic_table power_table[] = { .address = 0x00, .reg = 0x13, .bit = 0x05, - }, + }, /* ALD1 */ { .address = 0x04, .reg = 0x13, .bit = 0x06, - }, + }, /* ALD2 */ { .address = 0x08, .reg = 0x13, .bit = 0x07, - }, + }, /* ALD3 */ { .address = 0x0c, .reg = 0x12, .bit = 0x03, - }, + }, /* DLD1 */ { .address = 0x10, .reg = 0x12, .bit = 0x04, - }, + }, /* DLD2 */ { .address = 0x14, .reg = 0x12, .bit = 0x05, - }, + }, /* DLD3 */ { .address = 0x18, .reg = 0x12, .bit = 0x06, - }, + }, /* DLD4 */ { .address = 0x1c, .reg = 0x12, .bit = 0x00, - }, + }, /* ELD1 */ { .address = 0x20, .reg = 0x12, .bit = 0x01, - }, + }, /* ELD2 */ { .address = 0x24, .reg = 0x12, .bit = 0x02, - }, + }, /* ELD3 */ { .address = 0x28, .reg = 0x13, .bit = 0x02, - }, + }, /* FLD1 */ { .address = 0x2c, .reg = 0x13, .bit = 0x03, - }, + }, /* FLD2 */ { .address = 0x30, .reg = 0x13, .bit = 0x04, - }, + }, /* FLD3 */ { - .address = 0x38, + .address = 0x34, .reg = 0x10, .bit = 0x03, - }, + }, /* BUC1 */ { - .address = 0x3c, + .address = 0x38, .reg = 0x10, .bit = 0x06, - }, + }, /* BUC2 */ { - .address = 0x40, + .address = 0x3c, .reg = 0x10, .bit = 0x05, - }, + }, /* BUC3 */ { - .address = 0x44, + .address = 0x40, .reg = 0x10, .bit = 0x04, - }, + }, /* BUC4 */ { - .address = 0x48, + .address = 0x44, .reg = 0x10, .bit = 0x01, - }, + }, /* BUC5 */ { - .address = 0x4c, + .address = 0x48, .reg = 0x10, .bit = 0x00 - }, + }, /* BUC6 */ }; /* TMP0 - TMP5 are the same, all from GPADC */ diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 11154a330f07..c9bf74982688 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -259,6 +259,9 @@ static int __acpi_processor_start(struct acpi_device *device) if (ACPI_SUCCESS(status)) return 0; + result = -ENODEV; + acpi_pss_perf_exit(pr, device); + err_power_exit: acpi_processor_power_exit(pr); return result; @@ -267,11 +270,16 @@ err_power_exit: static int acpi_processor_start(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); + int ret; if (!device) return -ENODEV; - return __acpi_processor_start(device); + /* Protect against concurrent CPU hotplug operations */ + get_online_cpus(); + ret = __acpi_processor_start(device); + put_online_cpus(); + return ret; } static int acpi_processor_stop(struct device *dev) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index c72e64893d03..93d72413d844 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg { #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force); +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct); static int acpi_processor_update_tsd_coord(void) { @@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid throttling state, reset\n")); state = 0; - ret = acpi_processor_set_throttling(pr, state, true); + ret = __acpi_processor_set_throttling(pr, state, true, + true); if (ret) return ret; } @@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return 0; } -static int acpi_processor_get_throttling(struct acpi_processor *pr) +static long __acpi_processor_get_throttling(void *data) { - cpumask_var_t saved_mask; - int ret; + struct acpi_processor *pr = data; + + return pr->throttling.acpi_processor_get_throttling(pr); +} +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - /* - * Migrate task to the cpu pointed by pr. + * This is either called from the CPU hotplug callback of + * processor_driver or via the ACPI probe function. In the latter + * case the CPU is not guaranteed to be online. Both call sites are + * protected against CPU hotplug. */ - cpumask_copy(saved_mask, ¤t->cpus_allowed); - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the target pr->id CPU. Exit */ - free_cpumask_var(saved_mask); + if (!cpu_online(pr->id)) return -ENODEV; - } - ret = pr->throttling.acpi_processor_get_throttling(pr); - /* restore the previous state */ - set_cpus_allowed_ptr(current, saved_mask); - free_cpumask_var(saved_mask); - return ret; + return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data) arg->target_state, arg->force); } -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force) +static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +{ + if (direct) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct) { int ret = 0; unsigned int i; @@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, + direct); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, @@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = match_pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, - &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg, direct); } } /* @@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return ret; } +int acpi_processor_set_throttling(struct acpi_processor *pr, int state, + bool force) +{ + return __acpi_processor_set_throttling(pr, state, force, false); +} + int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f0099360039e..42086ad535c5 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -82,7 +82,8 @@ static ssize_t driver_override_store(struct device *_dev, struct amba_device *dev = to_amba_device(_dev); char *driver_override, *old = dev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 9f62841a8f10..db6a51427b04 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2148,8 +2148,14 @@ static void binder_send_failed_reply(struct binder_transaction *t, &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { - WARN(1, "Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + /* + * Cannot get here for normal operation, but + * we can if multiple synchronous transactions + * are sent without blocking for responses. + * Just ignore the 2nd error in this case. + */ + pr_warn("Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8ddf5d5c94fd..5a6a01135470 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -538,7 +538,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ .driver_data = board_ahci_yes_fbs }, /* Promise */ diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index aaa761b9081c..cd2eab6aa92e 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(dev, "no irq\n"); - return -EINVAL; + if (irq != -EPROBE_DEFER) + dev_err(dev, "no irq\n"); + return irq; } hpriv->irq = irq; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 69ec1c5d7152..2d677ba46d77 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4224,6 +4224,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* Crucial BX100 SSD 500GB has broken LPM support */ + { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, + + /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ + { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* 512GB MX100 with newer firmware has only LPM issues */ + { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + + /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ + { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4235,7 +4254,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -5077,8 +5098,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) * We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ - if (WARN_ON_ONCE(ata_is_data(prot) && - (!qc->sg || !qc->n_elem || !qc->nbytes))) + if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) goto sys_err; if (ata_is_dma(prot) || (ata_is_pio(prot) && diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5b2aee83d776..4a267347a6d9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3472,7 +3472,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { /* relay SCSI command to ATAPI device */ int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) goto bad_cdb_len; xlat_func = atapi_xlat; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1c36de9719e5..1dd16f26e77d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos); @@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) */ static int loop_flush(struct loop_device *lo) { + /* loop not yet configured, no running thread, nothing to flush */ + if (lo->lo_state != Lo_bound) + return 0; return loop_switch(lo, NULL); } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 54cef3dc0beb..7fca7cfd5b09 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -216,7 +216,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, @@ -247,6 +246,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, /* QCA ROME chipset */ + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 71325e443e46..8a3bf0a8c31d 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -936,6 +936,9 @@ static int qca_setup(struct hci_uart *hu) if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); + } else if (ret == -ENOENT) { + /* No patch/nvm-config found, run with original fw/config */ + ret = 0; } /* Setup bdaddr */ diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index f364fa4d24eb..f59183018280 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014 Broadcom Corporation + * Copyright (C) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -33,8 +33,6 @@ #define ARB_ERR_CAP_CLEAR (1 << 0) #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) #define ARB_ERR_CAP_STATUS_TEA (1 << 11) -#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) -#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) #define ARB_ERR_CAP_STATUS_VALID (1 << 0) @@ -43,7 +41,6 @@ enum { ARB_ERR_CAP_CLR, ARB_ERR_CAP_HI_ADDR, ARB_ERR_CAP_ADDR, - ARB_ERR_CAP_DATA, ARB_ERR_CAP_STATUS, ARB_ERR_CAP_MASTER, }; @@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = { [ARB_ERR_CAP_CLR] = 0x0c4, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0c8, - [ARB_ERR_CAP_DATA] = 0x0cc, [ARB_ERR_CAP_STATUS] = 0x0d0, [ARB_ERR_CAP_MASTER] = -1, }; @@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = { [ARB_ERR_CAP_CLR] = 0x0c8, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0cc, - [ARB_ERR_CAP_DATA] = 0x0d0, [ARB_ERR_CAP_STATUS] = 0x0d4, [ARB_ERR_CAP_MASTER] = 0x0d8, }; @@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = { [ARB_ERR_CAP_CLR] = 0x168, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x16c, - [ARB_ERR_CAP_DATA] = 0x170, [ARB_ERR_CAP_STATUS] = 0x174, [ARB_ERR_CAP_MASTER] = 0x178, }; @@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = { [ARB_ERR_CAP_CLR] = 0x7e4, [ARB_ERR_CAP_HI_ADDR] = 0x7e8, [ARB_ERR_CAP_ADDR] = 0x7ec, - [ARB_ERR_CAP_DATA] = 0x7f0, [ARB_ERR_CAP_STATUS] = 0x7f4, [ARB_ERR_CAP_MASTER] = 0x7f8, }; @@ -105,9 +98,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) { int offset = gdev->gisb_offsets[reg]; - /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ - if (offset == -1) - return 1; + if (offset < 0) { + /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ + if (reg == ARB_ERR_CAP_MASTER) + return 1; + else + return 0; + } if (gdev->big_endian) return ioread32be(gdev->base + offset); @@ -115,6 +112,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) return ioread32(gdev->base + offset); } +static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev) +{ + u64 value; + + value = gisb_read(gdev, ARB_ERR_CAP_ADDR); + value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; + + return value; +} + static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) { int offset = gdev->gisb_offsets[reg]; @@ -123,9 +130,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) return; if (gdev->big_endian) - iowrite32be(val, gdev->base + reg); + iowrite32be(val, gdev->base + offset); else - iowrite32(val, gdev->base + reg); + iowrite32(val, gdev->base + offset); } static ssize_t gisb_arb_get_timeout(struct device *dev, @@ -181,7 +188,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, const char *reason) { u32 cap_status; - unsigned long arb_addr; + u64 arb_addr; u32 master; const char *m_name; char m_fmt[11]; @@ -193,10 +200,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, return 1; /* Read the address and master */ - arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; -#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) - arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; -#endif + arb_addr = gisb_read_address(gdev); master = gisb_read(gdev, ARB_ERR_CAP_MASTER); m_name = brcmstb_gisb_master_to_str(gdev, master); @@ -205,7 +209,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, m_name = m_fmt; } - pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", + pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n", __func__, reason, arb_addr, cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1341a94cc779..76afc841232c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, } } wmb(); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 16ff781cde65..b0b36d00415d 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -689,7 +689,7 @@ int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry, byte_mask = 0x01 << (item_num % 8); offset = equip_id * 514; - if (offset + byte_index > DCI_LOG_MASK_SIZE) { + if (offset + byte_index >= DCI_LOG_MASK_SIZE) { pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n", __func__, offset, log_code, byte_index); return 0; @@ -716,7 +716,7 @@ int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry, bit_index = event_id % 8; byte_mask = 0x1 << bit_index; - if (byte_index > DCI_EVENT_MASK_SIZE) { + if (byte_index >= DCI_EVENT_MASK_SIZE) { pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n", __func__, event_id, byte_index); return 0; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f53e8ba2c718..83c206f0fc98 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 40d400fe5bb7..4ada103945f0 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -515,7 +515,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(2, &panic_done_count); + atomic_add(1, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -525,7 +525,7 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_recv_msg, 1); if (rv) - atomic_sub(2, &panic_done_count); + atomic_sub(1, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -549,12 +549,12 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); - atomic_add(2, &panic_done_count); + atomic_add(1, &panic_done_count); rv = i_ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { - atomic_sub(2, &panic_done_count); + atomic_sub(1, &panic_done_count); printk(KERN_WARNING PFX "Unable to extend the watchdog timeout."); } else { diff --git a/drivers/char/random.c b/drivers/char/random.c index 1822472dffab..bd9fc2baa6aa 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -886,12 +886,16 @@ static void add_interrupt_bench(cycles_t start) static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) { __u32 *ptr = (__u32 *) regs; + unsigned int idx; if (regs == NULL) return 0; - if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) - f->reg_idx = 0; - return *(ptr + f->reg_idx++); + idx = READ_ONCE(f->reg_idx); + if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) + idx = 0; + ptr += idx++; + WRITE_ONCE(f->reg_idx, idx); + return *ptr; } void add_interrupt_randomness(int irq, int irq_flags) diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 8d626784cd8d..49e4040eeb55 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -485,7 +485,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, size_t count) { int size = 0; - int expected; + u32 expected; if (!chip) return -EBUSY; @@ -502,7 +502,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index aaa5fa95dede..36afc1a21699 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -1040,6 +1040,11 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) break; recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); + if (recd > num_bytes) { + total = -EFAULT; + break; + } + memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd); dest += recd; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 286bd090a488..389a009b83f2 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -622,6 +622,11 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, if (!rc) { data_len = be16_to_cpup( (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { + rc = -EFAULT; + goto out; + } + data = &buf.data[TPM_HEADER_SIZE + 6]; memcpy(payload->key, data, data_len - 1); @@ -629,6 +634,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, payload->migratable = data[data_len - 1]; } +out: tpm_buf_destroy(&buf); return rc; } diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index f2aa99e34b4b..9f12ad74a09b 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if ((size_t) expected > count) { + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index a1e1474dda30..aedf726cbab6 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -267,7 +267,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; - int expected, status, burst_count, retries, size = 0; + int status; + int burst_count; + int retries; + int size = 0; + u32 expected; if (count < TPM_HEADER_SIZE) { i2c_nuvoton_ready(chip); /* return to idle */ @@ -309,7 +313,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * to machine native */ expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < size) { dev_err(dev, "%s() expected > count\n", __func__); size = -EIO; continue; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7f13221aeb30..9dd93a209ef2 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -283,7 +283,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -298,7 +299,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 35ab89fe9d7b..7c4b1ffe874f 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -912,8 +912,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) ~A2W_PLL_CTRL_PWRDN); /* Take the PLL out of reset. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->cm_ctrl_reg, cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); + spin_unlock(&cprman->regs_lock); /* Wait for the PLL to lock. */ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); @@ -997,9 +999,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, } /* Unmask the reference clock from the oscillator. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, A2W_XOSC_CTRL, cprman_read(cprman, A2W_XOSC_CTRL) | data->reference_enable_mask); + spin_unlock(&cprman->regs_lock); if (do_ana_setup_first) bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c index a564e9248814..adc14145861a 100644 --- a/drivers/clk/bcm/clk-ns2.c +++ b/drivers/clk/bcm/clk-ns2.c @@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr", static const struct iproc_pll_ctrl genpll_sw = { .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL, - .aon = AON_VAL(0x0, 2, 9, 8), + .aon = AON_VAL(0x0, 1, 11, 10), .reset = RESET_VAL(0x4, 2, 1), .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3), .ndiv_int = REG_VAL(0x8, 4, 10), diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 43a218f35b19..4ad32ce428cf 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) rc = clk_set_rate(clk, rate); if (rc < 0) - pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", + pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n", __clk_get_name(clk), rate, rc, clk_get_rate(clk)); clk_put(clk); diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index cd0f2726f5e0..c40445488d3a 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = { }; /* find closest match to given frequency in OPP table */ -static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) +static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) { int idx; - u32 fmin = 0, fmax = ~0, ftmp; + unsigned long fmin = 0, fmax = ~0, ftmp; const struct scpi_opp *opp = clk->info->opps; for (idx = 0; idx < clk->info->count; idx++, opp++) { ftmp = opp->freq; - if (ftmp >= (u32)rate) { + if (ftmp >= rate) { if (ftmp <= fmax) fmax = ftmp; break; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index e346b223199d..a01ee9a3ed6d 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = { "xtal", "clkin" }; static const char * const si5351_pll_names[] = { - "plla", "pllb", "vxco" + "si5351_plla", "si5351_pllb", "si5351_vxco" }; static const char * const si5351_msynth_names[] = { "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" diff --git a/drivers/clk/msm/virtclk-front.c b/drivers/clk/msm/virtclk-front.c index 4018c4922574..2d8a9e8ec61c 100644 --- a/drivers/clk/msm/virtclk-front.c +++ b/drivers/clk/msm/virtclk-front.c @@ -77,7 +77,7 @@ static int virtclk_front_get_id(struct clk *clk) } ret = habmm_socket_recv(handle, &rsp, &rsp_size, - UINT_MAX, 0); + UINT_MAX, HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -132,7 +132,8 @@ static int virtclk_front_prepare(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -185,7 +186,8 @@ static void virtclk_front_unprepare(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -236,7 +238,8 @@ static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -290,7 +293,8 @@ static int virtclk_front_set_rate(struct clk *clk, unsigned long rate) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -362,7 +366,8 @@ static unsigned long virtclk_front_get_rate(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { ret = 0; pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index ff0c8327fabe..3c3cf8e04eea 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -210,9 +210,11 @@ static unsigned long clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_tbl *f_curr; u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; - if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) { + if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw) + || !clk_hw_is_enabled(hw))) { if (!rcg->current_freq) rcg->current_freq = cxo_f.freq; return rcg->current_freq; @@ -232,9 +234,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) mode >>= CFG_MODE_SHIFT; } - mask = BIT(rcg->hid_width) - 1; - hid_div = cfg >> CFG_SRC_DIV_SHIFT; - hid_div &= mask; + if (rcg->enable_safe_config) { + f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq); + if (!f_curr) + return -EINVAL; + + hid_div = f_curr->pre_div; + } else { + mask = BIT(rcg->hid_width) - 1; + hid_div = cfg >> CFG_SRC_DIV_SHIFT; + hid_div &= mask; + } return calc_rate(parent_rate, m, n, mode, hid_div); } diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 2e7f03d50f4e..95a4dd290f35 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = { static struct clk_rcg2 codec_digcodec_clk_src = { .cmd_rcgr = 0x1c09c, + .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_codec_clk, diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c index a4044955c68f..d388073f5f42 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -430,8 +430,8 @@ static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll, pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */ pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */ pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */ - pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */ - pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */ + pdb->in.pll_icpmset = 7; /* 7, reg: 0x04fc, bit 3 - 5 */ + pdb->in.pll_icpcset = 7; /* 7, reg: 0x04fc, bit 0 - 2 */ pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */ pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */ pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */ diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 8b2061fca5f0..7ca79714649e 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -46,6 +46,15 @@ config CPU_FREQ_STAT_DETAILS If in doubt, say N. +config CPU_FREQ_TIMES + bool "CPU frequency time-in-state statistics" + default y + help + This driver exports CPU time-in-state information through procfs file + system. + + If in doubt, say N. + choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 6d4a7aeb506d..4f9ba8eb0130 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -4,7 +4,10 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o cpufreq_governor_attr_set.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o -# CPUfreq governors +# CPUfreq times +obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o + +# CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5f0f983ce173..659d2029ac5a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -19,6 +19,7 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpufreq_times.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> @@ -447,6 +448,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, pr_debug("FREQ: %lu - CPU: %lu\n", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); + cpufreq_times_record_transition(freqs); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) @@ -1354,6 +1356,7 @@ static int cpufreq_online(unsigned int cpu) goto out_exit_policy; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); + cpufreq_times_create_policy(policy); write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c new file mode 100644 index 000000000000..e5df7a47cc16 --- /dev/null +++ b/drivers/cpufreq/cpufreq_times.c @@ -0,0 +1,461 @@ +/* drivers/cpufreq/cpufreq_times.c + * + * Copyright (C) 2018 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/cpufreq.h> +#include <linux/cpufreq_times.h> +#include <linux/cputime.h> +#include <linux/hashtable.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/threads.h> + +#define UID_HASH_BITS 10 + +DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS); + +static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ +static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */ + +struct uid_entry { + uid_t uid; + unsigned int max_state; + struct hlist_node hash; + struct rcu_head rcu; + u64 time_in_state[0]; +}; + +/** + * struct cpu_freqs - per-cpu frequency information + * @offset: start of these freqs' stats in task time_in_state array + * @max_state: number of entries in freq_table + * @last_index: index in freq_table of last frequency switched to + * @freq_table: list of available frequencies + */ +struct cpu_freqs { + unsigned int offset; + unsigned int max_state; + unsigned int last_index; + unsigned int freq_table[0]; +}; + +static struct cpu_freqs *all_freqs[NR_CPUS]; + +static unsigned int next_offset; + + +/* Caller must hold rcu_read_lock() */ +static struct uid_entry *find_uid_entry_rcu(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_uid_entry_locked(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_or_register_uid_locked(uid_t uid) +{ + struct uid_entry *uid_entry, *temp; + unsigned int max_state = READ_ONCE(next_offset); + size_t alloc_size = sizeof(*uid_entry) + max_state * + sizeof(uid_entry->time_in_state[0]); + + uid_entry = find_uid_entry_locked(uid); + if (uid_entry) { + if (uid_entry->max_state == max_state) + return uid_entry; + /* uid_entry->time_in_state is too small to track all freqs, so + * expand it. + */ + temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC); + if (!temp) + return uid_entry; + temp->max_state = max_state; + memset(temp->time_in_state + uid_entry->max_state, 0, + (max_state - uid_entry->max_state) * + sizeof(uid_entry->time_in_state[0])); + if (temp != uid_entry) { + hlist_replace_rcu(&uid_entry->hash, &temp->hash); + kfree_rcu(uid_entry, rcu); + } + return temp; + } + + uid_entry = kzalloc(alloc_size, GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; + uid_entry->max_state = max_state; + + hash_add_rcu(uid_hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static bool freq_index_invalid(unsigned int index) +{ + unsigned int cpu; + struct cpu_freqs *freqs; + + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || index < freqs->offset || + freqs->offset + freqs->max_state <= index) + continue; + return freqs->freq_table[index - freqs->offset] == + CPUFREQ_ENTRY_INVALID; + } + return true; +} + +static int single_uid_time_in_state_show(struct seq_file *m, void *ptr) +{ + struct uid_entry *uid_entry; + unsigned int i; + u64 time; + uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private); + + if (uid == overflowuid) + return -EINVAL; + + rcu_read_lock(); + + uid_entry = find_uid_entry_rcu(uid); + if (!uid_entry) { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + time = cputime_to_clock_t(uid_entry->time_in_state[i]); + seq_write(m, &time, sizeof(time)); + } + + rcu_read_unlock(); + + return 0; +} + +static void *uid_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void uid_seq_stop(struct seq_file *seq, void *v) { } + +static int uid_time_in_state_seq_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + struct cpu_freqs *freqs, *last_freqs = NULL; + int i, cpu; + + if (v == uid_hash_table) { + seq_puts(m, "uid:"); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == + CPUFREQ_ENTRY_INVALID) + continue; + seq_printf(m, " %d", freqs->freq_table[i]); + } + } + seq_putc(m, '\n'); + } + + rcu_read_lock(); + + hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { + if (uid_entry->max_state) + seq_printf(m, "%d:", uid_entry->uid); + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t( + uid_entry->time_in_state[i])); + } + if (uid_entry->max_state) + seq_putc(m, '\n'); + } + + rcu_read_unlock(); + return 0; +} + +void cpufreq_task_times_init(struct task_struct *p) +{ + void *temp; + unsigned long flags; + unsigned int max_state; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = 0; + + max_state = READ_ONCE(next_offset); + + /* We use one array to avoid multiple allocs per task */ + temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC); + if (!temp) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = temp; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = max_state; +} + +/* Caller must hold task_time_in_state_lock */ +static int cpufreq_task_times_realloc_locked(struct task_struct *p) +{ + void *temp; + unsigned int max_state = READ_ONCE(next_offset); + + temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC); + if (!temp) + return -ENOMEM; + p->time_in_state = temp; + memset(p->time_in_state + p->max_state, 0, + (max_state - p->max_state) * sizeof(u64)); + p->max_state = max_state; + return 0; +} + +void cpufreq_task_times_exit(struct task_struct *p) +{ + unsigned long flags; + void *temp; + + if (!p->time_in_state) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + temp = p->time_in_state; + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + kfree(temp); +} + +int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *p) +{ + unsigned int cpu, i; + cputime_t cputime; + unsigned long flags; + struct cpu_freqs *freqs; + struct cpu_freqs *last_freqs = NULL; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + + seq_printf(m, "cpu%u\n", cpu); + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID) + continue; + cputime = 0; + if (freqs->offset + i < p->max_state && + p->time_in_state) + cputime = p->time_in_state[freqs->offset + i]; + seq_printf(m, "%u %lu\n", freqs->freq_table[i], + (unsigned long)cputime_to_clock_t(cputime)); + } + } + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + return 0; +} + +void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime) +{ + unsigned long flags; + unsigned int state; + struct uid_entry *uid_entry; + struct cpu_freqs *freqs = all_freqs[task_cpu(p)]; + uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); + + if (!freqs || p->flags & PF_EXITING) + return; + + state = freqs->offset + READ_ONCE(freqs->last_index); + + spin_lock_irqsave(&task_time_in_state_lock, flags); + if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) && + p->time_in_state) + p->time_in_state[state] += cputime; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + + spin_lock_irqsave(&uid_lock, flags); + uid_entry = find_or_register_uid_locked(uid); + if (uid_entry && state < uid_entry->max_state) + uid_entry->time_in_state[state] += cputime; + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_create_policy(struct cpufreq_policy *policy) +{ + int cpu, index; + unsigned int count = 0; + struct cpufreq_frequency_table *pos, *table; + struct cpu_freqs *freqs; + void *tmp; + + if (all_freqs[policy->cpu]) + return; + + table = cpufreq_frequency_get_table(policy->cpu); + if (!table) + return; + + cpufreq_for_each_entry(pos, table) + count++; + + tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count, + GFP_KERNEL); + if (!tmp) + return; + + freqs = tmp; + freqs->max_state = count; + + index = cpufreq_frequency_table_get_index(policy, policy->cur); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_for_each_entry(pos, table) + freqs->freq_table[pos - table] = pos->frequency; + + freqs->offset = next_offset; + WRITE_ONCE(next_offset, freqs->offset + count); + for_each_cpu(cpu, policy->related_cpus) + all_freqs[cpu] = freqs; +} + +void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + unsigned long flags; + + spin_lock_irqsave(&uid_lock, flags); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp, + hash, uid_start) { + if (uid_start == uid_entry->uid) { + hash_del_rcu(&uid_entry->hash); + kfree_rcu(uid_entry, rcu); + } + } + } + + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_record_transition(struct cpufreq_freqs *freq) +{ + int index; + struct cpu_freqs *freqs = all_freqs[freq->cpu]; + struct cpufreq_policy *policy; + + if (!freqs) + return; + + policy = cpufreq_cpu_get(freq->cpu); + if (!policy) + return; + + index = cpufreq_frequency_table_get_index(policy, freq->new); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_cpu_put(policy); +} + +static const struct seq_operations uid_time_in_state_seq_ops = { + .start = uid_seq_start, + .next = uid_seq_next, + .stop = uid_seq_stop, + .show = uid_time_in_state_seq_show, +}; + +static int uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &uid_time_in_state_seq_ops); +} + +int single_uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, single_uid_time_in_state_show, + &(inode->i_uid)); +} + +static const struct file_operations uid_time_in_state_fops = { + .open = uid_time_in_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init cpufreq_times_init(void) +{ + proc_create_data("uid_time_in_state", 0444, NULL, + &uid_time_in_state_fops, NULL); + + return 0; +} + +early_initcall(cpufreq_times_init); diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 68ef8fd9482f..f5c4e009113c 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -364,7 +364,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) static int s3c_cpufreq_init(struct cpufreq_policy *policy) { policy->clk = clk_arm; - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); + + policy->cpuinfo.transition_latency = cpu_cur.info->latency; + + if (ftab) + return cpufreq_table_validate_and_show(policy, ftab); + + return 0; } static int __init s3c_cpufreq_initclks(void) diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 86628e22b2a3..719c3d9f07fb 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -30,54 +30,63 @@ static DEFINE_PER_CPU(struct clk, sh_cpuclk); +struct cpufreq_target { + struct cpufreq_policy *policy; + unsigned int freq; +}; + static unsigned int sh_cpufreq_get(unsigned int cpu) { return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; } -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int sh_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static long __sh_cpufreq_target(void *arg) { - unsigned int cpu = policy->cpu; + struct cpufreq_target *target = arg; + struct cpufreq_policy *policy = target->policy; + int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpumask_t cpus_allowed; struct cpufreq_freqs freqs; struct device *dev; long freq; - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - BUG_ON(smp_processor_id() != cpu); + if (smp_processor_id() != cpu) + return -ENODEV; dev = get_cpu_device(cpu); /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); + freq = clk_round_rate(cpuclk, target->freq * 1000); if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) return -EINVAL; - dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); + dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000); freqs.old = sh_cpufreq_get(cpu); freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_freq_transition_begin(policy, &freqs); - set_cpus_allowed_ptr(current, &cpus_allowed); + cpufreq_freq_transition_begin(target->policy, &freqs); clk_set_rate(cpuclk, freq); - cpufreq_freq_transition_end(policy, &freqs, 0); + cpufreq_freq_transition_end(target->policy, &freqs, 0); dev_dbg(dev, "set frequency %lu Hz\n", freq); - return 0; } +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sh_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_target data = { .policy = policy, .freq = target_freq }; + + return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data); +} + static int sh_cpufreq_verify(struct cpufreq_policy *policy) { struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index a5c111b67f37..ea11a33e7fff 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break; - if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + } if (!idle_state_valid(state_node, i, cpumask)) { pr_warn("%s idle state not valid, bailing out\n", diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 584a1857624a..324cce5d7354 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1706,7 +1706,8 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl) struct cpuidle_state *st = &cl->drv->states[i]; struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i]; snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); - snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); + snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", + cpu_level->name); st->flags = 0; st->exit_latency = cpu_level->pwr.latency_us; st->power_usage = cpu_level->pwr.ss_power; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0f6fd42f55ca..48d4dddf4941 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -911,6 +911,21 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } +static int sdma_disable_channel_with_delay(struct dma_chan *chan) +{ + sdma_disable_channel(chan); + + /* + * According to NXP R&D team a delay of one BD SDMA cost time + * (maximum is 1ms) should be added after disable of the channel + * bit, to ensure SDMA core has really been stopped after SDMA + * clients call .device_terminate_all. + */ + mdelay(1); + + return 0; +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1707,17 +1722,24 @@ static int sdma_probe(struct platform_device *pdev) if (IS_ERR(sdma->clk_ahb)) return PTR_ERR(sdma->clk_ahb); - clk_prepare(sdma->clk_ipg); - clk_prepare(sdma->clk_ahb); + ret = clk_prepare(sdma->clk_ipg); + if (ret) + return ret; + + ret = clk_prepare(sdma->clk_ahb); + if (ret) + goto err_clk; ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", sdma); if (ret) - return ret; + goto err_irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); - if (!sdma->script_addrs) - return -ENOMEM; + if (!sdma->script_addrs) { + ret = -ENOMEM; + goto err_irq; + } /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; @@ -1793,7 +1815,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel; + sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); @@ -1832,6 +1854,10 @@ err_register: dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); +err_irq: + clk_unprepare(sdma->clk_ahb); +err_clk: + clk_unprepare(sdma->clk_ipg); return ret; } @@ -1842,6 +1868,8 @@ static int sdma_remove(struct platform_device *pdev) dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); + clk_unprepare(sdma->clk_ahb); + clk_unprepare(sdma->clk_ipg); /* Kill the tasklet */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 8100ede095d5..c7bd1c5315f4 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -51,7 +51,15 @@ struct ti_am335x_xbar_map { static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val, iomem + event); + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 0574e1bbe45c..3ce5609b4611 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) /* Non-ECC RAM? */ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); res = -ENODEV; - goto err2; + goto err; } edac_dbg(3, "init mci\n"); diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index d071e89d3124..99fd598b5069 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -230,10 +230,15 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) } msleep(100); } - if (status_down) + if (status_down) { dev_dbg(dev, "shutdown successful\n"); - else + esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc); + } else { dev_err(mdm->dev, "graceful poff ipc fail\n"); + graceful_shutdown = false; + goto force_poff; + } + break; force_poff: case ESOC_FORCE_PWR_OFF: if (!graceful_shutdown) { diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c index 4be66a16a3a1..0288082cea00 100644 --- a/drivers/esoc/esoc-mdm-pon.c +++ b/drivers/esoc/esoc-mdm-pon.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,7 +55,7 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) if (!atomic) usleep_range(reset_time_us, reset_time_us + 100000); else - mdelay(mdm->reset_time_ms); + mdelay(DEF_MDM9X55_RESET_TIME); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert); return 0; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 06d345b087f8..759a39906a52 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2145,7 +2145,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - status = gpiod_request(desc, con_id); + /* If a connection label was passed use that, else use the device name as label */ + status = gpiod_request(desc, con_id ? con_id : dev_name(dev)); if (status < 0) return ERR_PTR(status); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index a142d5ae148d..5c40d6d710af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -585,6 +585,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, size_t size; u32 retry = 3; + if (amdgpu_acpi_pcie_notify_device_ready(adev)) + return -EINVAL; + /* Get the device handle */ handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 930083336968..1f0e6ede120c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && + amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { + /* Don't start link training before we have the DPCD */ + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ amdgpu_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -862,9 +861,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = amdgpu_connector_best_single_encoder(connector); if (!encoder) @@ -918,8 +919,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -981,9 +984,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1108,8 +1113,10 @@ out: amdgpu_connector_update_scratch_regs(connector, ret); exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1351,9 +1358,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1421,8 +1430,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82903ca78529..c555781685ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -560,6 +560,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index b57fffc2d4af..0a91261b6f5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2104,34 +2104,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..2acbd43f9a53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, return ret; } +static void kfd_topology_kobj_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops sysprops_ops = { .show = sysprops_show, }; static struct kobj_type sysprops_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &sysprops_ops, }; @@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = { }; static struct kobj_type iolink_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &iolink_ops, }; @@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = { }; static struct kobj_type mem_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &mem_ops, }; @@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = { }; static struct kobj_type cache_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &cache_ops, }; @@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = { }; static struct kobj_type node_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &node_ops, }; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 65d4c5d8d94b..fbd717324328 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3766,8 +3766,7 @@ monitor_name(struct detailed_timing *t, void *data) * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The - * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to - * fill in. + * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { @@ -3843,6 +3842,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) } eld[5] |= sad_count << 4; + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; + else + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; + eld[DRM_ELD_BASELINE_ELD_LEN] = DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8090989185b2..4ddbc49125cd 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1271,9 +1271,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) if (atomic_dec_and_test(&vblank->refcount)) { if (drm_vblank_offdelay == 0) return; - else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0) + else if (drm_vblank_offdelay < 0) vblank_disable_fn((unsigned long)vblank); - else + else if (!dev->vblank_disable_immediate) mod_timer(&vblank->disable_timer, jiffies + ((drm_vblank_offdelay * HZ)/1000)); } @@ -1902,6 +1902,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) wake_up(&vblank->queue); drm_handle_vblank_events(dev, pipe); + /* With instant-off, we defer disabling the interrupt until after + * we finish processing the following vblank. The disable has to + * be last (after drm_handle_vblank_events) so that the timestamp + * is always accurate. + */ + if (dev->vblank_disable_immediate && + drm_vblank_offdelay > 0 && + !atomic_read(&vblank->refcount)) + vblank_disable_fn((unsigned long)vblank); + spin_unlock_irqrestore(&dev->event_lock, irqflags); return true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index f8b5fcfa91a2..1fe4b8e6596b 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -412,6 +412,26 @@ out: } /** + * drm_kms_helper_is_poll_worker - is %current task an output poll worker? + * + * Determine if %current task is an output poll worker. This can be used + * to select distinct code paths for output polling versus other contexts. + * + * One use case is to avoid a deadlock between the output poll worker and + * the autosuspend worker wherein the latter waits for polling to finish + * upon calling drm_kms_helper_poll_disable(), while the former waits for + * runtime suspend to finish upon calling pm_runtime_get_sync() in a + * connector ->detect hook. + */ +bool drm_kms_helper_is_poll_worker(void) +{ + struct work_struct *work = current_work(); + + return work && work->func == output_poll_execute; +} +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); + +/** * drm_kms_helper_poll_disable - disable output polling * @dev: drm_device * diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c index 9144dfdf30c9..7887bda23df0 100644 --- a/drivers/gpu/drm/msm/dba_bridge.c +++ b/drivers/gpu/drm/msm/dba_bridge.c @@ -51,6 +51,7 @@ struct dba_bridge { u32 num_of_input_lanes; bool pluggable; u32 panel_count; + bool cont_splash_enabled; }; #define to_dba_bridge(x) container_of((x), struct dba_bridge, base) @@ -324,6 +325,7 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev, bridge->panel_count = data->panel_count; bridge->base.funcs = &_dba_bridge_ops; bridge->base.encoder = encoder; + bridge->cont_splash_enabled = data->cont_splash_enabled; rc = drm_bridge_attach(dev, &bridge->base); if (rc) { @@ -339,7 +341,10 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev, encoder->bridge = &bridge->base; } - if (!bridge->pluggable) { + /* If early splash has enabled bridge chip in bootloader, + * below call should be skipped. + */ + if (!bridge->pluggable && !bridge->cont_splash_enabled) { if (bridge->ops.power_on) bridge->ops.power_on(bridge->dba_ctx, true, 0); if (bridge->ops.check_hpd) diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h index 5562d2b2aef9..edc130f92257 100644 --- a/drivers/gpu/drm/msm/dba_bridge.h +++ b/drivers/gpu/drm/msm/dba_bridge.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -44,6 +44,7 @@ struct dba_bridge_init { struct drm_bridge *precede_bridge; bool pluggable; u32 panel_count; + bool cont_splash_enabled; }; /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 6015cf35e030..7a90c7be4e5c 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -1598,7 +1598,7 @@ exit: * * Return: error code. */ -int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl) +int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled) { int rc = 0; @@ -1615,37 +1615,40 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl) goto error; } - dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw, + if (!cont_splash_enabled) { + dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw, &dsi_ctrl->host_config.lane_map); - dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config); - if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) { - dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw, + if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) { + dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config, &dsi_ctrl->host_config.u.cmd_engine); - dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw, dsi_ctrl->host_config.video_timing.h_active, dsi_ctrl->host_config.video_timing.h_active * 3, dsi_ctrl->host_config.video_timing.v_active, 0x0); - } else { - dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw, + } else { + dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config, &dsi_ctrl->host_config.u.video_engine); - dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, &dsi_ctrl->host_config.video_timing); + } } - - dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0); dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0); - /* Perform a soft reset before enabling dsi controller */ - dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); + /* Perform a soft reset before enabling dsi controller + * But skip the reset if dsi is enabled in bootloader. + */ + if (!cont_splash_enabled) + dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index); dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1); error: @@ -1967,6 +1970,12 @@ error: return rc; } +void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl, + enum dsi_power_state state) +{ + dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state); +} + /** * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller * @dsi_ctrl: DSI controller handle. diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 993a35cbf84a..c0ba532011b5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -331,6 +331,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl); /** * dsi_ctrl_host_init() - Initialize DSI host hardware. * @dsi_ctrl: DSI controller handle. + * @cont_splash_enabled: Flag for DSI splash enabled in bootloader. * * Initializes DSI controller hardware with host configuration provided by * dsi_ctrl_update_host_config(). Initialization can be performed only during @@ -339,7 +340,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl); * * Return: error code. */ -int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl); +int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled); /** * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware. @@ -404,6 +405,16 @@ int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl, enum dsi_power_state state); /** + * dsi_ctrl_update_power_state() - update power state for dsi controller + * @dsi_ctrl: DSI controller handle. + * @state: Power state. + * + * Update power state for DSI controller. + * + */ +void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl, + enum dsi_power_state state); +/** * dsi_ctrl_set_cmd_engine_state() - set command engine state * @dsi_ctrl: DSI Controller handle. * @state: Engine state. diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 09ab14cc4746..c468a6f5caa2 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -174,6 +174,11 @@ static int dsi_display_ctrl_power_on(struct dsi_display *display) int i; struct dsi_display_ctrl *ctrl; + if (display->cont_splash_enabled) { + pr_debug("skip ctrl power on\n"); + return rc; + } + /* Sequence does not matter for split dsi usecases */ for (i = 0; i < display->ctrl_count; i++) { @@ -460,7 +465,8 @@ static int dsi_display_ctrl_init(struct dsi_display *display) for (i = 0 ; i < display->ctrl_count; i++) { ctrl = &display->ctrl[i]; - rc = dsi_ctrl_host_init(ctrl->ctrl); + rc = dsi_ctrl_host_init(ctrl->ctrl, + display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to init host_%d, rc=%d\n", display->name, i, rc); @@ -720,7 +726,7 @@ static int dsi_display_phy_enable(struct dsi_display *display) rc = dsi_phy_enable(m_ctrl->phy, &display->config, m_src, - true); + true, display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to enable DSI PHY, rc=%d\n", display->name, rc); @@ -735,7 +741,7 @@ static int dsi_display_phy_enable(struct dsi_display *display) rc = dsi_phy_enable(ctrl->phy, &display->config, DSI_PLL_SOURCE_NON_NATIVE, - true); + true, display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to enable DSI PHY, rc=%d\n", display->name, rc); @@ -848,6 +854,11 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display) int i; struct dsi_display_ctrl *m_ctrl, *ctrl; + if (display->cont_splash_enabled) { + pr_debug("skip phy sw reset\n"); + return 0; + } + m_ctrl = &display->ctrl[display->cmd_master_idx]; rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl); @@ -1748,6 +1759,45 @@ static int _dsi_display_dev_deinit(struct dsi_display *display) return rc; } +/* + * _dsi_display_config_ctrl_for_splash + * + * Config ctrl engine for DSI display. + * @display: Handle to the display + * Returns: Zero on success + */ +static int _dsi_display_config_ctrl_for_splash(struct dsi_display *display) +{ + int rc = 0; + + if (!display) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + if (display->config.panel_mode == DSI_OP_VIDEO_MODE) { + rc = dsi_display_vid_engine_enable(display); + if (rc) { + pr_err("[%s]failed to enable video engine, rc=%d\n", + display->name, rc); + goto error_out; + } + } else if (display->config.panel_mode == DSI_OP_CMD_MODE) { + rc = dsi_display_cmd_engine_enable(display); + if (rc) { + pr_err("[%s]failed to enable cmd engine, rc=%d\n", + display->name, rc); + goto error_out; + } + } else { + pr_err("[%s] Invalid configuration\n", display->name); + rc = -EINVAL; + } + +error_out: + return rc; +} + /** * dsi_display_bind - bind dsi device with controlling device * @dev: Pointer to base of platform device @@ -2141,6 +2191,8 @@ int dsi_display_drm_bridge_init(struct dsi_display *display, init_data.num_of_input_lanes = num_of_lanes; init_data.precede_bridge = precede_bridge; init_data.panel_count = display->panel_count; + init_data.cont_splash_enabled = + display->cont_splash_enabled; dba_bridge = dba_bridge_init(display->drm_dev, enc, &init_data); if (IS_ERR_OR_NULL(dba_bridge)) { @@ -2451,26 +2503,28 @@ int dsi_display_prepare(struct dsi_display *display) mutex_lock(&display->display_lock); - for (i = 0; i < display->panel_count; i++) { - rc = dsi_panel_pre_prepare(display->panel[i]); - if (rc) { - SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n", - display->name, rc); - goto error_panel_post_unprep; + if (!display->cont_splash_enabled) { + for (i = 0; i < display->panel_count; i++) { + rc = dsi_panel_pre_prepare(display->panel[i]); + if (rc) { + SDE_ERROR("[%s]pre-prepare failed, rc=%d\n", + display->name, rc); + goto error_panel_post_unprep; + } } } rc = dsi_display_ctrl_power_on(display); if (rc) { pr_err("[%s] failed to power on dsi controllers, rc=%d\n", - display->name, rc); + display->name, rc); goto error_panel_post_unprep; } rc = dsi_display_phy_power_on(display); if (rc) { pr_err("[%s] failed to power on dsi phy, rc = %d\n", - display->name, rc); + display->name, rc); goto error_ctrl_pwr_off; } @@ -2497,21 +2551,21 @@ int dsi_display_prepare(struct dsi_display *display) rc = dsi_display_ctrl_init(display); if (rc) { pr_err("[%s] failed to setup DSI controller, rc=%d\n", - display->name, rc); + display->name, rc); goto error_phy_disable; } rc = dsi_display_ctrl_link_clk_on(display); if (rc) { pr_err("[%s] failed to enable DSI link clocks, rc=%d\n", - display->name, rc); + display->name, rc); goto error_ctrl_deinit; } rc = dsi_display_ctrl_host_enable(display); if (rc) { pr_err("[%s] failed to enable DSI host, rc=%d\n", - display->name, rc); + display->name, rc); goto error_ctrl_link_off; } @@ -2519,11 +2573,10 @@ int dsi_display_prepare(struct dsi_display *display) rc = dsi_panel_prepare(display->panel[j]); if (rc) { SDE_ERROR("[%s] panel prepare failed, rc=%d\n", - display->name, rc); + display->name, rc); goto error_panel_unprep; } } - goto error; error_panel_unprep: @@ -2559,6 +2612,12 @@ int dsi_display_enable(struct dsi_display *display) return -EINVAL; } + if (display->cont_splash_enabled) { + _dsi_display_config_ctrl_for_splash(display); + display->cont_splash_enabled = false; + return 0; + } + mutex_lock(&display->display_lock); for (i = 0; i < display->panel_count; i++) { @@ -2755,6 +2814,46 @@ int dsi_display_unprepare(struct dsi_display *display) return rc; } +int dsi_dsiplay_setup_splash_resource(struct dsi_display *display) +{ + int ret = 0, i = 0; + struct dsi_display_ctrl *ctrl; + + if (!display) + return -EINVAL; + + for (i = 0; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + if (!ctrl) + return -EINVAL; + + dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.host_pwr, true); + dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.digital, true); + dsi_pwr_enable_regulator(&ctrl->phy->pwr_info.phy_pwr, true); + + ret = dsi_clk_enable_core_clks(&ctrl->ctrl->clk_info.core_clks, + true); + if (ret) { + SDE_ERROR("failed to set core clk for dsi, ret = %d\n", + ret); + return -EINVAL; + } + + ret = dsi_clk_enable_link_clks(&ctrl->ctrl->clk_info.link_clks, + true); + if (ret) { + SDE_ERROR("failed to set link clk for dsi, ret = %d\n", + ret); + return -EINVAL; + } + + dsi_ctrl_update_power_state(ctrl->ctrl, + DSI_CTRL_POWER_LINK_CLK_ON); + } + + return ret; +} + static int __init dsi_display_register(void) { dsi_phy_drv_register(); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index 210b8d00850b..3723f19fd0e7 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -158,6 +158,8 @@ struct dsi_display { /* DEBUG FS */ struct dentry *root; + + bool cont_splash_enabled; }; int dsi_display_dev_probe(struct platform_device *pdev); @@ -338,4 +340,15 @@ int dsi_display_clock_gate(struct dsi_display *display, bool enable); int dsi_dispaly_static_frame(struct dsi_display *display, bool enable); int dsi_display_set_backlight(void *display, u32 bl_lvl); + +/** + * dsi_dsiplay_setup_splash_resource + * @display: Handle to display. + * + * Setup DSI splash resource to avoid reset and glitch if DSI is enabled + * in bootloder. + * + * Return: error code. + */ +int dsi_dsiplay_setup_splash_resource(struct dsi_display *display); #endif /* _DSI_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index 309401eb3093..35000d7eb12a 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -439,7 +439,7 @@ int dsi_connector_get_modes(struct drm_connector *connector, rc = dsi_display_get_modes(display, NULL, &count); if (rc) { pr_err("failed to get num of modes, rc=%d\n", rc); - goto error; + goto end; } size = count * sizeof(*modes); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index 1ccbbe7df573..da3b3b548e5f 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -721,9 +721,10 @@ error: * Return: error code. */ int dsi_phy_enable(struct msm_dsi_phy *phy, - struct dsi_host_config *config, - enum dsi_phy_pll_source pll_source, - bool skip_validation) + struct dsi_host_config *config, + enum dsi_phy_pll_source pll_source, + bool skip_validation, + bool cont_splash_enabled) { int rc = 0; @@ -758,7 +759,8 @@ int dsi_phy_enable(struct msm_dsi_phy *phy, goto error_disable_clks; } - dsi_phy_enable_hw(phy); + if (!cont_splash_enabled) + dsi_phy_enable_hw(phy); error_disable_clks: rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h index 6c31bfa3ea00..aa21d0b347e8 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -157,9 +157,10 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable); * Return: error code. */ int dsi_phy_enable(struct msm_dsi_phy *dsi_phy, - struct dsi_host_config *config, - enum dsi_phy_pll_source pll_source, - bool skip_validation); + struct dsi_host_config *config, + enum dsi_phy_pll_source pll_source, + bool skip_validation, + bool cont_splash_enabled); /** * dsi_phy_disable() - disable DSI PHY hardware. diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 21b89663a9c3..852ac80410f0 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -1311,7 +1311,7 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) } } - if (!sde_kms->splash_info.handoff) { + if (!sde_hdmi->cont_splash_enabled) { sde_hdmi_set_mode(hdmi, false); _sde_hdmi_phy_reset(hdmi); sde_hdmi_set_mode(hdmi, true); @@ -3186,7 +3186,6 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) struct msm_drm_private *priv = NULL; struct hdmi *hdmi; struct platform_device *pdev; - struct sde_kms *sde_kms; DBG(""); if (!display || !display->drm_dev || !enc) { @@ -3252,8 +3251,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) * clocks. This can skip the clock disabling operation in * clock_late_init when finding clk.count == 1. */ - sde_kms = to_sde_kms(priv->kms); - if (sde_kms->splash_info.handoff) { + if (display->cont_splash_enabled) { sde_hdmi_bridge_power_on(hdmi->bridge); hdmi->power_on = true; } else { diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 607d2cf3c7b7..9cf807e829c7 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -196,6 +196,8 @@ struct sde_hdmi { struct dss_io_data io[HDMI_TX_MAX_IO]; /* DEBUG FS */ struct dentry *root; + + bool cont_splash_enabled; }; /** diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 01b6425c6e19..d751625bbfd7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -193,7 +193,8 @@ static void mdp5_plane_reset(struct drm_plane *plane) kfree(to_mdp5_plane_state(plane->state)); mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); - + if (!mdp5_state) + return; /* assign default blend parameters */ mdp5_state->alpha = 255; mdp5_state->premultiplied = 0; @@ -686,14 +687,21 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, bool vflip, hflip; unsigned long flags; int ret; + const struct msm_format *msm_fmt; + msm_fmt = msm_framebuffer_format(fb); nplanes = drm_format_num_planes(fb->pixel_format); /* bad formats should already be rejected: */ if (WARN_ON(nplanes > pipe2nclients(pipe))) return -EINVAL; - format = to_mdp_format(msm_framebuffer_format(fb)); + if (!msm_fmt) { + pr_err("invalid format"); + return -EINVAL; + } + + format = to_mdp_format(msm_fmt); pix_format = format->base.pixel_format; /* src values are in Q16 fixed point, convert to integer: */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 2e528b112e1f..af36b95beadb 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -347,6 +347,10 @@ static int submit_reloc(struct msm_gpu *gpu, * to do it page-by-page, w/ kmap() if not vmap()d.. */ ptr = msm_gem_vaddr(&obj->base); + if (!ptr) { + DRM_ERROR("Invalid format"); + return -EINVAL; + } if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index b52c4752c5fe..4586b62401fb 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -237,7 +237,8 @@ static struct device *find_context_bank(const char *name) /* Get the parent device */ parent = of_find_device_by_node(node->parent); - + if (!parent) + return ERR_PTR(-ENODEV); /* Populate the sub nodes */ of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index ea3f138ee461..cd00ab4b4a81 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -39,10 +39,10 @@ #include "sde_trace.h" /* default input fence timeout, in ms */ -#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000 +#define SDE_CRTC_INPUT_FENCE_TIMEOUT 10000 /* - * The default input fence timeout is 2 seconds while max allowed + * The default input fence timeout is 10 seconds while max allowed * range is 10 seconds. Any value above 10 seconds adds glitches beyond * tolerance limit. */ @@ -1625,8 +1625,18 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion); sde_kms_info_add_keyint(info, "max_linewidth", catalog->max_mixer_width); - sde_kms_info_add_keyint(info, "max_blendstages", - catalog->max_mixer_blendstages); + + /* till now, we can't know which display early RVC will run on. + * Not to impact early RVC's layer, we decrease all lm's blend stage. + * This should be restored after handoff is done. + */ + if (sde_kms->splash_info.handoff) + sde_kms_info_add_keyint(info, "max_blendstages", + catalog->max_mixer_blendstages - 1); + else + sde_kms_info_add_keyint(info, "max_blendstages", + catalog->max_mixer_blendstages); + if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2) sde_kms_info_add_keystr(info, "qseed_type", "qseed2"); if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3) diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index a94de553c855..b95157b28855 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -639,6 +639,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev, continue; } + rc = sde_splash_setup_display_resource(&sde_kms->splash_info, + display, DRM_MODE_CONNECTOR_DSI); + if (rc) { + SDE_ERROR("dsi %d splash resource setup failed %d\n", + i, rc); + sde_encoder_destroy(encoder); + continue; + } + rc = dsi_display_drm_bridge_init(display, encoder); if (rc) { SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc); @@ -731,6 +740,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev, continue; } + rc = sde_splash_setup_display_resource(&sde_kms->splash_info, + display, DRM_MODE_CONNECTOR_HDMIA); + if (rc) { + SDE_ERROR("hdmi %d splash resource setup failed %d\n", + i, rc); + sde_encoder_destroy(encoder); + continue; + } + rc = sde_hdmi_drm_init(display, encoder); if (rc) { SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc); @@ -812,6 +830,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) struct msm_drm_private *priv; struct sde_mdss_cfg *catalog; + struct sde_splash_info *sinfo; int primary_planes_idx, i, ret; int max_crtc_count, max_plane_count; @@ -824,6 +843,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) dev = sde_kms->dev; priv = dev->dev_private; catalog = sde_kms->catalog; + sinfo = &sde_kms->splash_info; ret = sde_core_irq_domain_add(sde_kms); if (ret) @@ -851,7 +871,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) primary = false; plane = sde_plane_init(dev, catalog->vp[i].id, - primary, 1UL << crtc_id, true); + primary, 1UL << crtc_id, true, false); if (IS_ERR(plane)) { SDE_ERROR("sde_plane_init failed\n"); ret = PTR_ERR(plane); @@ -869,14 +889,22 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) for (i = 0; i < max_plane_count; i++) { bool primary = true; + bool resv_plane = false; if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR) || primary_planes_idx >= max_crtc_count) primary = false; + if (sde_splash_query_plane_is_reserved(sinfo, + catalog->sspp[i].id)) { + resv_plane = true; + DRM_INFO("pipe%d is reserved\n", + catalog->sspp[i].id); + } + plane = sde_plane_init(dev, catalog->sspp[i].id, primary, (1UL << max_crtc_count) - 1, - false); + false, resv_plane); if (IS_ERR(plane)) { SDE_ERROR("sde_plane_init failed\n"); ret = PTR_ERR(plane); @@ -1337,12 +1365,17 @@ static int sde_kms_hw_init(struct msm_kms *kms) */ sinfo = &sde_kms->splash_info; if (sinfo->handoff) { - rc = sde_splash_parse_dt(dev); + rc = sde_splash_parse_memory_dt(dev); if (rc) { - SDE_ERROR("parse dt for splash info failed: %d\n", rc); + SDE_ERROR("parse memory dt failed: %d\n", rc); goto power_error; } + rc = sde_splash_parse_reserved_plane_dt(sinfo, + sde_kms->catalog); + if (rc) + SDE_ERROR("parse reserved plane dt failed: %d\n", rc); + sde_splash_init(&priv->phandle, kms); } diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index acd5687f6d11..ceac5a931e7e 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1798,7 +1798,7 @@ static void sde_plane_atomic_update(struct drm_plane *plane, /* helper to install properties which are common to planes and crtcs */ static void _sde_plane_install_properties(struct drm_plane *plane, - struct sde_mdss_cfg *catalog) + struct sde_mdss_cfg *catalog, bool plane_reserved) { static const struct drm_prop_enum_list e_blend_op[] = { {SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"}, @@ -1994,6 +1994,16 @@ static void _sde_plane_install_properties(struct drm_plane *plane, sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale); sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp); sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp); + + /* When early RVC is enabled in bootloader and doesn't exit, + * user app should not touch the pipe which RVC is on. + * So mark the plane_unavailibility to the special pipe's property, + * user can parse this property of this pipe and stop this pipe's + * allocation after parsing. + * plane_reserved is 1, means the pipe is occupied in bootloader. + * plane_reserved is 0, means it's not used in bootloader. + */ + sde_kms_info_add_keyint(info, "plane_unavailability", plane_reserved); msm_property_set_blob(&psde->property_info, &psde->blob_info, info->data, info->len, PLANE_PROP_INFO); @@ -2731,7 +2741,8 @@ end: /* initialize plane */ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, bool primary_plane, - unsigned long possible_crtcs, bool vp_enabled) + unsigned long possible_crtcs, + bool vp_enabled, bool plane_reserved) { struct drm_plane *plane = NULL; struct sde_plane *psde; @@ -2856,7 +2867,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT, sizeof(struct sde_plane_state)); - _sde_plane_install_properties(plane, kms->catalog); + _sde_plane_install_properties(plane, kms->catalog, plane_reserved); /* save user friendly pipe name for later */ snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id); diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index 7b91822d4cde..8ac582643926 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -77,10 +77,12 @@ void sde_plane_flush(struct drm_plane *plane); * @primary_plane: true if this pipe is primary plane for crtc * @possible_crtcs: bitmask of crtc that can be attached to the given pipe * @vp_enabled: Flag indicating if virtual planes enabled + * @plane_reserved: Flag indicating the plane is occupied in bootloader */ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, bool primary_plane, - unsigned long possible_crtcs, bool vp_enabled); + unsigned long possible_crtcs, + bool vp_enabled, bool plane_reserved); /** * sde_plane_wait_input_fence - wait for input fence object diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c index 2789ae053663..f6bd7b040dcb 100644 --- a/drivers/gpu/drm/msm/sde/sde_splash.c +++ b/drivers/gpu/drm/msm/sde/sde_splash.c @@ -23,6 +23,7 @@ #include "sde_hw_intf.h" #include "sde_hw_catalog.h" #include "dsi_display.h" +#include "sde_hdmi.h" #define MDP_SSPP_TOP0_OFF 0x1000 #define DISP_INTF_SEL 0x004 @@ -39,6 +40,10 @@ #define SDE_LK_EXIT_MAX_LOOP 20 +#define INTF_HDMI_SEL (BIT(25) | BIT(24)) +#define INTF_DSI0_SEL BIT(8) +#define INTF_DSI1_SEL BIT(16) + static DEFINE_MUTEX(sde_splash_lock); /* @@ -283,6 +288,44 @@ static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo) sinfo->splash_mem_size = NULL; } +static void _sde_splash_sent_pipe_update_uevent(struct sde_kms *sde_kms) +{ + char *event_string; + char *envp[2]; + struct drm_device *dev; + struct device *kdev; + int i = 0; + + if (!sde_kms || !sde_kms->dev) { + DRM_ERROR("invalid input\n"); + return; + } + + dev = sde_kms->dev; + kdev = dev->primary->kdev; + + event_string = kzalloc(SZ_4K, GFP_KERNEL); + if (!event_string) { + SDE_ERROR("failed to allocate event string\n"); + return; + } + + for (i = 0; i < MAX_BLOCKS; i++) { + if (sde_kms->splash_info.reserved_pipe_info[i] != 0xFFFFFFFF) + snprintf(event_string, SZ_4K, "pipe%d avialable", + sde_kms->splash_info.reserved_pipe_info[i]); + } + + DRM_INFO("generating pipe update event[%s]", event_string); + + envp[0] = event_string; + envp[1] = NULL; + + kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp); + + kfree(event_string); +} + static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo, u32 *hdmi_cnt, u32 *dsi_cnt) { @@ -372,12 +415,12 @@ void sde_splash_destroy(struct sde_splash_info *sinfo, } /* - * sde_splash_parse_dt. + * sde_splash_parse_memory_dt. * In the function, it will parse and reserve two kinds of memory node. * First is to get the reserved memory for display buffers. - * Second is to get the memory node LK's code stack is running on. + * Second is to get the memory node which LK's heap memory is running on. */ -int sde_splash_parse_dt(struct drm_device *dev) +int sde_splash_parse_memory_dt(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct sde_kms *sde_kms; @@ -404,6 +447,79 @@ int sde_splash_parse_dt(struct drm_device *dev) return 0; } +static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg, + const char *name) +{ + int i; + + for (i = 0; i < cfg->sspp_count; i++) { + if (!strcmp(cfg->sspp[i].name, name)) + return cfg->sspp[i].id; + } + + return 0; +} + +int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info, + struct sde_mdss_cfg *cfg) +{ + struct device_node *parent, *node; + struct property *prop; + const char *cname; + int ret = 0, i = 0; + + if (!splash_info || !cfg) + return -EINVAL; + + parent = of_find_node_by_path("/qcom,sde-reserved-plane"); + if (!parent) + return -EINVAL; + + for (i = 0; i < MAX_BLOCKS; i++) + splash_info->reserved_pipe_info[i] = 0xFFFFFFFF; + + i = 0; + for_each_child_of_node(parent, node) { + if (i >= MAX_BLOCKS) { + SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n", + i, MAX_BLOCKS); + ret = -EINVAL; + goto parent_node_err; + } + + of_property_for_each_string(node, "qcom,plane-name", + prop, cname) + splash_info->reserved_pipe_info[i] = + _sde_splash_parse_sspp_id(cfg, cname); + i++; + } + +parent_node_err: + of_node_put(parent); + + return ret; +} + +bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo, + uint32_t pipe) +{ + int i = 0; + + if (!sinfo) + return false; + + /* early return if no splash is enabled */ + if (!sinfo->handoff) + return false; + + for (i = 0; i < MAX_BLOCKS; i++) { + if (sinfo->reserved_pipe_info[i] == pipe) + return true; + } + + return false; +} + int sde_splash_get_handoff_status(struct msm_kms *kms) { uint32_t intf_sel = 0; @@ -448,14 +564,17 @@ int sde_splash_get_handoff_status(struct msm_kms *kms) * considered as single display. So decrement * 'num_of_display_on' by 1 */ - if (split_display) + if (split_display) { num_of_display_on--; + sinfo->split_is_enabled = true; + } } if (num_of_display_on) { sinfo->handoff = true; sinfo->program_scratch_regs = true; sinfo->lk_is_exited = false; + sinfo->intf_sel_status = intf_sel; } else { sinfo->handoff = false; sinfo->program_scratch_regs = false; @@ -504,6 +623,71 @@ int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, return ret ? 0 : -ENOMEM; } +static bool _sde_splash_get_panel_intf_status(struct sde_splash_info *sinfo, + const char *display_name, int connector_type) +{ + bool ret = false; + int intf_status = 0; + + if (sinfo && sinfo->handoff) { + if (connector_type == DRM_MODE_CONNECTOR_DSI) { + if (!strcmp(display_name, "dsi_adv_7533_1")) { + if (sinfo->intf_sel_status & INTF_DSI0_SEL) + ret = true; + } else if (!strcmp(display_name, "dsi_adv_7533_2")) { + if (sinfo->intf_sel_status & INTF_DSI1_SEL) + ret = true; + } else + DRM_INFO("wrong display name %s\n", + display_name); + } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + intf_status = sinfo->intf_sel_status & INTF_HDMI_SEL; + ret = (intf_status == INTF_HDMI_SEL); + } + } + + return ret; +} + +int sde_splash_setup_display_resource(struct sde_splash_info *sinfo, + void *disp, int connector_type) +{ + if (!sinfo || !disp) + return -EINVAL; + + /* early return if splash is not enabled in bootloader */ + if (!sinfo->handoff) + return 0; + + if (connector_type == DRM_MODE_CONNECTOR_DSI) { + struct dsi_display *display = (struct dsi_display *)disp; + + display->cont_splash_enabled = + _sde_splash_get_panel_intf_status(sinfo, + display->name, + connector_type); + + DRM_INFO("DSI splash %s\n", + display->cont_splash_enabled ? "enabled" : "disabled"); + + if (display->cont_splash_enabled) { + if (dsi_dsiplay_setup_splash_resource(display)) + return -EINVAL; + } + } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)disp; + + sde_hdmi->cont_splash_enabled = + _sde_splash_get_panel_intf_status(sinfo, + NULL, connector_type); + + DRM_INFO("HDMI splash %s\n", + sde_hdmi->cont_splash_enabled ? "enabled" : "disabled"); + } + + return 0; +} + void sde_splash_setup_connector_count(struct sde_splash_info *sinfo, int connector_type) { @@ -578,6 +762,8 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, sde_power_data_bus_bandwidth_ctrl(phandle, sde_kms->core_client, false); + _sde_splash_sent_pipe_update_uevent(sde_kms); + mutex_unlock(&sde_splash_lock); return 0; } diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h index babf88335e49..9eddd87e5e26 100644 --- a/drivers/gpu/drm/msm/sde/sde_splash.h +++ b/drivers/gpu/drm/msm/sde/sde_splash.h @@ -1,5 +1,5 @@ /** - * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -49,11 +49,20 @@ struct sde_splash_info { /* memory size of lk pool */ size_t lk_pool_size; + /* enabled statue of displays*/ + uint32_t intf_sel_status; + + /* DSI split enabled flag */ + bool split_is_enabled; + /* registered hdmi connector count */ uint32_t hdmi_connector_cnt; /* registered dst connector count */ uint32_t dsi_connector_cnt; + + /* reserved pipe info for early RVC */ + uint32_t reserved_pipe_info[MAX_BLOCKS]; }; /* APIs for early splash handoff functions */ @@ -99,11 +108,27 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, int connector_type, void *display); /** - * sde_splash_parse_dt. + * sde_splash_parse_memory_dt. * * Parse reserved memory block from DT for early splash. */ -int sde_splash_parse_dt(struct drm_device *dev); +int sde_splash_parse_memory_dt(struct drm_device *dev); + +/** + * sde_splash_parse_reserved_plane_dt + * + * Parse reserved plane information from DT for early RVC case. + */ +int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info, + struct sde_mdss_cfg *cfg); + +/* + * sde_splash_query_plane_is_reserved + * + * Query plane is reserved in dt. + */ +bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo, + uint32_t pipe); /** * sde_splash_smmu_map. @@ -129,4 +154,11 @@ void sde_splash_destroy(struct sde_splash_info *sinfo, */ bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo); +/** + * sde_splash_setup_display_resource + * + * Setup display resource based on connector type. + */ +int sde_splash_setup_display_resource(struct sde_splash_info *sinfo, + void *disp, int connector_type); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 2a5ed7460354..ababdaabe870 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -253,9 +253,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) - return conn_status; + /* Outputs are only polled while runtime active, so acquiring a + * runtime PM ref here is unnecessary (and would deadlock upon + * runtime suspend because it waits for polling to finish). + */ + if (!drm_kms_helper_is_poll_worker()) { + ret = pm_runtime_get_sync(connector->dev->dev); + if (ret < 0 && ret != -EACCES) + return conn_status; + } nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { @@ -323,8 +329,10 @@ detect_analog: out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return conn_status; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 00de1bf81519..9dfc2471ea09 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -104,7 +104,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, }; struct nouveau_display *disp = nouveau_display(crtc->dev); struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; - int ret, retry = 1; + int ret, retry = 20; do { ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f516b5891932..083db3f5181f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -288,7 +288,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) msecs_to_jiffies(100))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; + goto cleanup; } + + /* Check the engine status before continue */ + ret = wait_status(engine, DMM_PATSTATUS_READY | + DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE); } cleanup: diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 7ed08fdc4c42..393e5335e33b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj, size_t size = PAGE_SIZE * n; loff_t off = mmap_offset(obj) + (entry->obj_pgoff << PAGE_SHIFT); - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); if (m > 1) { int i; @@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj, * into account in some of the math, so figure out virtual stride * in pages */ - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); /* We don't use vmf->pgoff since that has the fake offset: */ pgoff = ((unsigned long)vmf->virtual_address - diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index c4a552637c93..3ff7689835dc 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -494,9 +494,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { int qxl_fbdev_init(struct qxl_device *qdev) { + int ret = 0; + +#ifdef CONFIG_DRM_FBDEV_EMULATION struct qxl_fbdev *qfbdev; int bpp_sel = 32; /* TODO: parameter from somewhere? */ - int ret; qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); if (!qfbdev) @@ -531,6 +533,8 @@ fini: drm_fb_helper_fini(&qfbdev->helper); free: kfree(qfbdev); +#endif + return ret; } @@ -546,6 +550,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev) void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) { + if (!qdev->mode_info.qfbdev) + return; + drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 134874cab4c7..80b6d6e4721a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3599,35 +3599,8 @@ static void cik_gpu_init(struct radeon_device *rdev) case CHIP_KAVERI: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 4; - if ((rdev->pdev->device == 0x1304) || - (rdev->pdev->device == 0x1305) || - (rdev->pdev->device == 0x130C) || - (rdev->pdev->device == 0x130F) || - (rdev->pdev->device == 0x1310) || - (rdev->pdev->device == 0x1311) || - (rdev->pdev->device == 0x131C)) { - rdev->config.cik.max_cu_per_sh = 8; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1309) || - (rdev->pdev->device == 0x130A) || - (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313) || - (rdev->pdev->device == 0x131D)) { - rdev->config.cik.max_cu_per_sh = 6; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1306) || - (rdev->pdev->device == 0x1307) || - (rdev->pdev->device == 0x130B) || - (rdev->pdev->device == 0x130E) || - (rdev->pdev->device == 0x1315) || - (rdev->pdev->device == 0x1318) || - (rdev->pdev->device == 0x131B)) { - rdev->config.cik.max_cu_per_sh = 4; - rdev->config.cik.max_backends_per_se = 1; - } else { - rdev->config.cik.max_cu_per_sh = 3; - rdev->config.cik.max_backends_per_se = 1; - } + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_sh_per_se = 1; rdev->config.cik.max_texture_channel_caches = 4; rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 30f00748ed37..1a2a7365d0b5 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -89,25 +89,18 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (radeon_dp_needs_link_train(radeon_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (!radeon_dp_getdpcd(radeon_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && + radeon_dp_needs_link_train(radeon_connector)) { + /* Don't start link training before we have the DPCD */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -891,9 +884,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -916,8 +911,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1020,9 +1019,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1089,8 +1090,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1153,9 +1156,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!radeon_connector->dac_load_detect) return ret; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1167,8 +1172,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1230,9 +1239,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (radeon_connector->detected_hpd_without_ddc) { force = true; @@ -1415,8 +1426,10 @@ out: } exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1666,9 +1679,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dig_connector->is_mst) return connector_status_disconnected; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1755,8 +1770,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3645b223aa37..446d99062306 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1374,6 +1374,12 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); if (radeon_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 73e41a8613da..29bd801f5dad 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long offset; unsigned long page, pos; - if (offset + size > info->fix.smem_len) + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b40ed6061f05..7f898cfdc746 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 4a74129c5708..7b6e5c5e7284 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, struct virtio_gpu_object *bo; uint32_t handle; - if (plane->fb) { - vgfb = to_virtio_gpu_framebuffer(plane->fb); + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; + if (bo->dumb) { + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + plane->state->crtc_x, plane->state->crtc_y, NULL); + } } else { handle = 0; } @@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, plane->state->crtc_h, plane->state->crtc_x, plane->state->crtc_y); + virtio_gpu_cmd_resource_flush(vgdev, handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->crtc_w, + plane->state->crtc_h); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index d2d93959b119..aec6e9eef489 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, set.y = 0; set.mode = NULL; set.fb = NULL; - set.num_connectors = 1; + set.num_connectors = 0; set.connectors = &par->con; ret = drm_mode_set_config_internal(&set); if (ret) { @@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv) flush_delayed_work(&par->local_work); mutex_lock(&par->bo_mutex); + drm_modeset_lock_all(vmw_priv->dev); (void) vmw_fb_kms_detach(par, true, false); + drm_modeset_unlock_all(vmw_priv->dev); mutex_unlock(&par->bo_mutex); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 060e5c6f4446..098e562bd579 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -27,7 +27,6 @@ #include "vmwgfx_kms.h" - /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) @@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_resource_prepare. */ -void vmw_kms_helper_resource_revert(struct vmw_resource *res) +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) { - vmw_kms_helper_buffer_revert(res->backup); + struct vmw_resource *res = ctx->res; + + vmw_kms_helper_buffer_revert(ctx->buf); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) * interrupted by a signal. */ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible) + bool interruptible, + struct vmw_validation_ctx *ctx) { int ret = 0; + ctx->buf = NULL; + ctx->res = res; + if (interruptible) ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); else @@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, res->dev_priv->has_mob); if (ret) goto out_unreserve; + + ctx->buf = vmw_dmabuf_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, return 0; out_revert: - vmw_kms_helper_buffer_revert(res->backup); + vmw_kms_helper_buffer_revert(ctx->buf); out_unreserve: vmw_resource_unreserve(res, false, NULL, 0); out_unlock: @@ -1974,11 +1982,13 @@ out_unlock: * @out_fence: Optional pointer to a fence pointer. If non-NULL, a * ref-counted fence pointer is returned here. */ -void vmw_kms_helper_resource_finish(struct vmw_resource *res, - struct vmw_fence_obj **out_fence) +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, + struct vmw_fence_obj **out_fence) { - if (res->backup || out_fence) - vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, + struct vmw_resource *res = ctx->res; + + if (ctx->buf || out_fence) + vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); vmw_resource_unreserve(res, false, NULL, 0); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index edd81503516d..63b05d5ee50a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -180,6 +180,11 @@ struct vmw_display_unit { bool is_implicit; }; +struct vmw_validation_ctx { + struct vmw_resource *res; + struct vmw_dma_buffer *buf; +}; + #define vmw_crtc_to_du(x) \ container_of(x, struct vmw_display_unit, crtc) #define vmw_connector_to_du(x) \ @@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_vmw_fence_rep __user * user_fence_rep); int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible); -void vmw_kms_helper_resource_revert(struct vmw_resource *res); -void vmw_kms_helper_resource_finish(struct vmw_resource *res, + bool interruptible, + struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, struct vmw_fence_obj **out_fence); int vmw_kms_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 13926ff192e3..f50fcd213413 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_kms_sou_surface_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index f823fc3efed7..3184a9ae22c1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_stdu_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, dest_x, dest_y, num_clips, inc, &sdirty.base); out_finish: - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 119741f7ce47..4daf1fad6ee1 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1381,13 +1381,13 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev, /* todo double check the reg writes */ while ((cur - opcode) < length) { - if (cur[0] == 1 && ((cur + 4) - opcode) <= length) { + if (cur[0] == 1 && (length - (cur - opcode) >= 4)) { /* Write a 32 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]); cur += 4; - } else if (cur[0] == 2 && ((cur + 5) - opcode) <= length) { + } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) { /* Write a 64 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; @@ -1395,7 +1395,7 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev, val = (val << 32) | cur[3]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val); cur += 5; - } else if (cur[0] == 3 && ((cur + 2) - opcode) <= length) { + } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) { /* Delay for X usec */ udelay(cur[1]); cur += 2; diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 0cd4f7216239..5eea6fe0d7bd 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev, { struct input_dev *input = hidinput->input; + /* + * ELO devices have one Button usage in GenDesk field, which makes + * hid-input map it to BTN_LEFT; that confuses userspace, which then + * considers the device to be a mouse/touchpad instead of touchscreen. + */ + clear_bit(BTN_LEFT, input->keybit); set_bit(BTN_TOUCH, input->keybit); set_bit(ABS_PRESSURE, input->absbit); input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 2ba6bf69b7d0..53e54855c366 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct /* * Ignore out-of-range values as per HID specification, - * section 5.10 and 6.2.25. + * section 5.10 and 6.2.25, when NULL state bit is present. + * When it's not, clamp the value to match Microsoft's input + * driver as mentioned in "Required HID usages for digitizers": + * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp * * The logical_minimum < logical_maximum check is done so that we * don't unintentionally discard values sent by devices which * don't specify logical min and max. */ if ((field->flags & HID_MAIN_ITEM_VARIABLE) && - (field->logical_minimum < field->logical_maximum) && - (value < field->logical_minimum || - value > field->logical_maximum)) { - dbg_hid("Ignoring out-of-range value %x\n", value); - return; + (field->logical_minimum < field->logical_maximum)) { + if (field->flags & HID_MAIN_ITEM_NULL_STATE && + (value < field->logical_minimum || + value > field->logical_maximum)) { + dbg_hid("Ignoring out-of-range value %x\n", value); + return; + } + value = clamp(value, + field->logical_minimum, + field->logical_maximum); } /* diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index a38af68cf326..0a0628d11c0b 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -976,7 +976,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; /* Pad to 32-bits - FIXME: Revisit*/ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) - goto drop; + goto inc_dropped; /* * Modem sends Phonet messages over SSI with its own endianess... @@ -1028,8 +1028,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) drop2: hsi_free_msg(msg); drop: - dev->stats.tx_dropped++; dev_kfree_skb(skb); +inc_dropped: + dev->stats.tx_dropped++; return 0; } diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index b24f1d3045f0..a629f7c130f0 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { u16 config_default; - int calibration_factor; + int calibration_value; int registers; int shunt_div; int bus_voltage_shift; int bus_voltage_lsb; /* uV */ - int power_lsb; /* uW */ + int power_lsb_factor; }; struct ina2xx_data { const struct ina2xx_config *config; long rshunt; + long current_lsb_uA; + long power_lsb_uW; struct mutex config_lock; struct regmap *regmap; @@ -115,21 +117,21 @@ struct ina2xx_data { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { .config_default = INA219_CONFIG_DEFAULT, - .calibration_factor = 40960000, + .calibration_value = 4096, .registers = INA219_REGISTERS, .shunt_div = 100, .bus_voltage_shift = 3, .bus_voltage_lsb = 4000, - .power_lsb = 20000, + .power_lsb_factor = 20, }, [ina226] = { .config_default = INA226_CONFIG_DEFAULT, - .calibration_factor = 5120000, + .calibration_value = 2048, .registers = INA226_REGISTERS, .shunt_div = 400, .bus_voltage_shift = 0, .bus_voltage_lsb = 1250, - .power_lsb = 25000, + .power_lsb_factor = 25, }, }; @@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval) return INA226_SHIFT_AVG(avg_bits); } +/* + * Calibration register is set to the best value, which eliminates + * truncation errors on calculating current register in hardware. + * According to datasheet (eq. 3) the best values are 2048 for + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. + */ static int ina2xx_calibrate(struct ina2xx_data *data) { - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - data->rshunt); - - return regmap_write(data->regmap, INA2XX_CALIBRATION, val); + return regmap_write(data->regmap, INA2XX_CALIBRATION, + data->config->calibration_value); } /* @@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data) if (ret < 0) return ret; - /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). - */ return ina2xx_calibrate(data); } @@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_POWER: - val = regval * data->config->power_lsb; + val = regval * data->power_lsb_uW; break; case INA2XX_CURRENT: - /* signed register, LSB=1mA (selected), in mA */ - val = (s16)regval; + /* signed register, result in mA */ + val = regval * data->current_lsb_uA; + val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: - val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - regval); + val = regval; break; default: /* programmer goofed */ @@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index, regval)); } -static ssize_t ina2xx_set_shunt(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +/* + * In order to keep calibration register value fixed, the product + * of current_lsb and shunt_resistor should also be fixed and equal + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order + * to keep the scale. + */ +static int ina2xx_set_shunt(struct ina2xx_data *data, long val) +{ + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, + data->config->shunt_div); + if (val <= 0 || val > dividend) + return -EINVAL; + + mutex_lock(&data->config_lock); + data->rshunt = val; + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); + data->power_lsb_uW = data->config->power_lsb_factor * + data->current_lsb_uA; + mutex_unlock(&data->config_lock); + + return 0; +} + +static ssize_t ina2xx_store_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) { unsigned long val; int status; @@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev, if (status < 0) return status; - if (val == 0 || - /* Values greater than the calibration factor make no sense. */ - val > data->config->calibration_factor) - return -EINVAL; - - mutex_lock(&data->config_lock); - data->rshunt = val; - status = ina2xx_calibrate(data); - mutex_unlock(&data->config_lock); + status = ina2xx_set_shunt(data, val); if (status < 0) return status; - return count; } @@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_set_shunt, + ina2xx_show_value, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ @@ -441,10 +457,7 @@ static int ina2xx_probe(struct i2c_client *client, val = INA2XX_RSHUNT_DEFAULT; } - if (val <= 0 || val > data->config->calibration_factor) - return -ENODEV; - - data->rshunt = val; + ina2xx_set_shunt(data, val); ina2xx_regmap_config.max_register = data->config->registers; diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 188af4c89f40..18477dd1e243 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -95,8 +95,8 @@ static const struct coefficients adm1075_coefficients[] = { [0] = { 27169, 0, -1 }, /* voltage */ [1] = { 806, 20475, -1 }, /* current, irange25 */ [2] = { 404, 20475, -1 }, /* current, irange50 */ - [3] = { 0, -1, 8549 }, /* power, irange25 */ - [4] = { 0, -1, 4279 }, /* power, irange50 */ + [3] = { 8549, 0, -1 }, /* power, irange25 */ + [4] = { 4279, 0, -1 }, /* power, irange50 */ }; static const struct coefficients adm1275_coefficients[] = { diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 3fd080b94069..0da9adc49574 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -45,8 +45,11 @@ #define TPIU_ITATBCTR0 0xef8 /** register definition **/ +/* FFSR - 0x300 */ +#define FFSR_FT_STOPPED BIT(1) /* FFCR - 0x304 */ #define FFCR_FON_MAN BIT(6) +#define FFCR_STOP_FI BIT(12) /** * @base: memory mapped base address for this component. @@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Clear formatter controle reg. */ - writel_relaxed(0x0, drvdata->base + TPIU_FFCR); + /* Clear formatter and stop on flush */ + writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR); /* Generate manual flush */ - writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + /* Wait for flush to complete */ + coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0); + /* Wait for formatter to stop */ + coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1); CS_LOCK(drvdata->base); } diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index 061ddadd1122..1b8199f1b25f 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c @@ -152,7 +152,7 @@ struct coresight_platform_data *of_get_coresight_platform_data( continue; /* The local out port number */ - pdata->outports[i] = endpoint.id; + pdata->outports[i] = endpoint.port; /* * Get a handle on the remote port and parent diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c index c0d962212720..67261bc10e80 100644 --- a/drivers/i2c/busses/i2c-msm-v2.c +++ b/drivers/i2c/busses/i2c-msm-v2.c @@ -2848,8 +2848,8 @@ static void i2c_msm_pm_rt_init(struct device *dev) {} static const struct dev_pm_ops i2c_msm_pm_ops = { #ifdef CONFIG_PM_SLEEP - .suspend_noirq = i2c_msm_pm_sys_suspend_noirq, - .resume_noirq = i2c_msm_pm_sys_resume_noirq, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(i2c_msm_pm_sys_suspend_noirq, + i2c_msm_pm_sys_resume_noirq) #endif SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend, i2c_msm_pm_rt_resume, diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index dfc98df7b1b6..7aa7b9cb6203 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -18,6 +18,9 @@ #define ACPI_SMBUS_HC_CLASS "smbus" #define ACPI_SMBUS_HC_DEVICE_NAME "cmi" +/* SMBUS HID definition as supported by Microsoft Windows */ +#define ACPI_SMBUS_MS_HID "SMB0001" + ACPI_MODULE_NAME("smbus_cmi"); struct smbus_methods_t { @@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = { static const struct acpi_device_id acpi_smbus_cmi_ids[] = { {"SMBUS01", (kernel_ulong_t)&smbus_methods}, {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods}, + {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 197a08b4e2f3..b4136d3bf6b7 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -628,6 +628,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { int st_accel_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *adata = iio_priv(indio_dev); + struct st_sensors_platform_data *pdata = + (struct st_sensors_platform_data *)adata->dev->platform_data; int irq = adata->get_irq_data_ready(indio_dev); int err; @@ -652,11 +654,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev) &adata->sensor_settings->fs.fs_avl[0]; adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; - if (!adata->dev->platform_data) - adata->dev->platform_data = - (struct st_sensors_platform_data *)&default_accel_pdata; + if (!pdata) + pdata = (struct st_sensors_platform_data *)&default_accel_pdata; - err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) return err; diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c index c73c6c62a6ac..7401f102dff4 100644 --- a/drivers/iio/adc/hi8435.c +++ b/drivers/iio/adc/hi8435.c @@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev, enum iio_event_direction dir, int state) { struct hi8435_priv *priv = iio_priv(idev); + int ret; + u32 tmp; + + if (state) { + ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp); + if (ret < 0) + return ret; + if (tmp & BIT(chan->channel)) + priv->event_prev_val |= BIT(chan->channel); + else + priv->event_prev_val &= ~BIT(chan->channel); - priv->event_scan_mask &= ~BIT(chan->channel); - if (state) priv->event_scan_mask |= BIT(chan->channel); + } else + priv->event_scan_mask &= ~BIT(chan->channel); return 0; } @@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi) priv->spi = spi; reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(reset_gpio)) { - /* chip s/w reset if h/w reset failed */ + if (!IS_ERR(reset_gpio)) { + /* need >=100ns low pulse to reset chip */ + gpiod_set_raw_value_cansleep(reset_gpio, 0); + udelay(1); + gpiod_set_raw_value_cansleep(reset_gpio, 1); + } else { + /* s/w reset chip if h/w reset is not available */ hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST); hi8435_writeb(priv, HI8435_CTRL_REG, 0); - } else { - udelay(5); - gpiod_set_value(reset_gpio, 1); } spi_set_drvdata(spi, idev); diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c index 6325e7dc8e03..f3cb4dc05391 100644 --- a/drivers/iio/magnetometer/st_magn_spi.c +++ b/drivers/iio/magnetometer/st_magn_spi.c @@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi) } static const struct spi_device_id st_magn_id_table[] = { - { LSM303DLHC_MAGN_DEV_NAME }, - { LSM303DLM_MAGN_DEV_NAME }, { LIS3MDL_MAGN_DEV_NAME }, { LSM303AGR_MAGN_DEV_NAME }, {}, diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 5056bd68573f..ba282ff3892d 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -436,6 +436,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = { int st_press_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *press_data = iio_priv(indio_dev); + struct st_sensors_platform_data *pdata = + (struct st_sensors_platform_data *)press_data->dev->platform_data; int irq = press_data->get_irq_data_ready(indio_dev); int err; @@ -464,12 +466,10 @@ int st_press_common_probe(struct iio_dev *indio_dev) press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz; /* Some devices don't support a data ready pin. */ - if (!press_data->dev->platform_data && - press_data->sensor_settings->drdy_irq.addr) - press_data->dev->platform_data = - (struct st_sensors_platform_data *)&default_press_pdata; + if (!pdata && press_data->sensor_settings->drdy_irq.addr) + pdata = (struct st_sensors_platform_data *)&default_press_pdata; - err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) return err; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6a8024d9d742..864a7c8d82d3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -86,6 +86,22 @@ int rdma_addr_size(struct sockaddr *addr) } EXPORT_SYMBOL(rdma_addr_size); +int rdma_addr_size_in6(struct sockaddr_in6 *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_in6); + +int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_kss); + static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b6c9a370a38b..d57a78ec7425 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3743,6 +3743,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, struct cma_multicast *mc; int ret; + if (!id->device) + return -EINVAL; + id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) @@ -4007,7 +4010,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) goto out; if (ibnl_put_attr(skb, nlh, - rdma_addr_size(cma_src_addr(id_priv)), + rdma_addr_size(cma_dst_addr(id_priv)), cma_dst_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) goto out; diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index fb43a242847b..8d7d110d0721 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -663,6 +663,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); + ret = -EINVAL; for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 886f61ea6cc7..960fcb613198 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -131,7 +131,7 @@ static inline struct ucma_context *_ucma_find_context(int id, ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); - else if (ctx->file != file) + else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } @@ -453,6 +453,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; + struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; @@ -473,10 +474,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, return -ENOMEM; ctx->uid = cmd.uid; - ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, - ucma_event_handler, ctx, cmd.ps, qp_type); - if (IS_ERR(ctx->cm_id)) { - ret = PTR_ERR(ctx->cm_id); + cm_id = rdma_create_id(current->nsproxy->net_ns, + ucma_event_handler, ctx, cmd.ps, qp_type); + if (IS_ERR(cm_id)) { + ret = PTR_ERR(cm_id); goto err1; } @@ -486,14 +487,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ret = -EFAULT; goto err2; } + + ctx->cm_id = cm_id; return 0; err2: - rdma_destroy_id(ctx->cm_id); + rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); + mutex_lock(&file->mut); + list_del(&ctx->list); + mutex_unlock(&file->mut); kfree(ctx); return ret; } @@ -623,6 +629,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -636,22 +645,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; - struct sockaddr *addr; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - addr = (struct sockaddr *) &cmd.addr; - if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) + if (cmd.reserved || !cmd.addr_size || + cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_bind_addr(ctx->cm_id, addr); + ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } @@ -667,13 +675,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.src_addr) || + !rdma_addr_size_in6(&cmd.dst_addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, - (struct sockaddr *) &cmd.dst_addr, - cmd.timeout_ms); + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -683,24 +694,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; - struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - src = (struct sockaddr *) &cmd.src_addr; - dst = (struct sockaddr *) &cmd.dst_addr; - if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || - !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) + if (cmd.reserved || + (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || + !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -1138,10 +1148,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (!ctx->cm_id->device) { + ret = -EINVAL; + goto out; + } + resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1274,6 +1292,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { @@ -1295,7 +1316,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, { struct rdma_ucm_notify cmd; struct ucma_context *ctx; - int ret; + int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; @@ -1304,7 +1325,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); + if (ctx->cm_id->device) + ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + ucma_put_ctx(ctx); return ret; } @@ -1322,7 +1345,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; - if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) + if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd->id); @@ -1381,7 +1404,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; - join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); + join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); + if (!join_cmd.addr_size) + return -EINVAL; + join_cmd.reserved = 0; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); @@ -1397,6 +1423,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_kss(&cmd.addr)) + return -EINVAL; + return ucma_process_join(file, &cmd, out_len); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 0ae337bec4f2..6790ebb366dd 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -354,7 +354,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, return -EINVAL; } - ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, + ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b7a73f1a8beb..3eb967521917 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2436,9 +2436,13 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, static void *alloc_wr(size_t wr_size, __u32 num_sge) { + if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / + sizeof (struct ib_sge)) + return NULL; + return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + num_sge * sizeof (struct ib_sge), GFP_KERNEL); -}; +} ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, struct ib_device *ib_dev, @@ -2665,6 +2669,13 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, goto err; } + if (user_wr->num_sge >= + (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / + sizeof (struct ib_sge)) { + ret = -EINVAL; + goto err; + } + next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + user_wr->num_sge * sizeof (struct ib_sge), GFP_KERNEL); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 5a2a0b5db938..67c4c73343d4 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1041,7 +1041,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) /* need to protect from a race on closing the vma as part of * mlx4_ib_vma_close(). */ - down_read(&owning_mm->mmap_sem); + down_write(&owning_mm->mmap_sem); for (i = 0; i < HW_BAR_COUNT; i++) { vma = context->hw_bar_info[i].vma; if (!vma) @@ -1055,11 +1055,13 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) BUG_ON(1); } + context->hw_bar_info[i].vma->vm_flags &= + ~(VM_SHARED | VM_MAYSHARE); /* context going to be destroyed, should not access ops any more */ context->hw_bar_info[i].vma->vm_ops = NULL; } - up_read(&owning_mm->mmap_sem); + up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 02c8deab1fff..4a4ab433062f 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -972,7 +972,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 86c303a620c1..748b63b86cbc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) dev->reset_stats.type = OCRDMA_RESET_STATS; dev->reset_stats.dev = dev; - if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + if (!debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats, &ocrdma_dbg_ops)) goto err; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index d3f0a384faad..f6b06729f4ea 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -945,6 +945,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; + + /* + * Update the broadcast address in the priv->broadcast object, + * in case it already exists, otherwise no one will do that. + */ + if (priv->broadcast) { + spin_lock_irq(&priv->lock); + memcpy(priv->broadcast->mcmember.mgid.raw, + priv->dev->broadcast + 4, + sizeof(union ib_gid)); + spin_unlock_irq(&priv->lock); + } + return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5c653669e736..37b42447045d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -724,6 +724,22 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); if (!IS_ERR_OR_NULL(ah)) { + /* + * pathrec.dgid is used as the database key from the LLADDR, + * it must remain unchanged even if the SA returns a different + * GID to use in the AH. + */ + if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid))) { + ipoib_dbg( + priv, + "%s got PathRec for gid %pI6 while asked for %pI6\n", + dev->name, pathrec->dgid.raw, + path->pathrec.dgid.raw); + memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid)); + } + path->pathrec = *pathrec; old_ah = path->ah; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 8bf48165f32c..21e688d55da6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -473,6 +473,9 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return -EINVAL; + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); rec.mgid = mcast->mcmember.mgid; @@ -631,8 +634,6 @@ void ipoib_mcast_join_task(struct work_struct *work) if (mcast->backoff == 1 || time_after_eq(jiffies, mcast->delay_until)) { /* Found the next unjoined group */ - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); if (ipoib_mcast_join(dev, mcast)) { spin_unlock_irq(&priv->lock); return; @@ -652,11 +653,9 @@ out: queue_delayed_work(priv->wq, &priv->mcast_task, delay_until - jiffies); } - if (mcast) { - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + if (mcast) ipoib_mcast_join(dev, mcast); - } + spin_unlock_irq(&priv->lock); } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index a73874508c3a..cb3a8623ff54 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2974,12 +2974,8 @@ static void srpt_queue_response(struct se_cmd *cmd) } spin_unlock_irqrestore(&ioctx->spinlock, flags); - if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) - || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { - atomic_inc(&ch->req_lim_delta); - srpt_abort_cmd(ioctx); + if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) return; - } dir = ioctx->cmd.data_direction; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index ddd8148d51d7..75ff4c965573 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -525,6 +525,16 @@ config KEYBOARD_GOLDFISH_EVENTS To compile this driver as a module, choose M here: the module will be called goldfish-events. +config KEYBOARD_GOLDFISH_ROTARY + depends on GOLDFISH + tristate "Rotary encoder device for Goldfish" + help + Say Y here to get an input event device for the Goldfish virtual + device emulator that acts as a rotary encoder. + + To compile this driver as a module, choose M here: the + module will be called goldfish-rotary. + config KEYBOARD_STOWAWAY tristate "Stowaway keyboard" select SERIO diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 1d416ddf84e4..a5d43fc8fab6 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o +obj-$(CONFIG_KEYBOARD_GOLDFISH_ROTARY) += goldfish_rotary.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o diff --git a/drivers/input/keyboard/goldfish_rotary.c b/drivers/input/keyboard/goldfish_rotary.c new file mode 100644 index 000000000000..485727d44684 --- /dev/null +++ b/drivers/input/keyboard/goldfish_rotary.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (C) 2012 Intel, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/input.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/acpi.h> + +enum { + REG_READ = 0x00, + REG_SET_PAGE = 0x00, + REG_LEN = 0x04, + REG_DATA = 0x08, + + PAGE_NAME = 0x00000, + PAGE_EVBITS = 0x10000, + PAGE_ABSDATA = 0x20000 | EV_ABS, +}; + +struct event_dev { + struct input_dev *input; + int irq; + void __iomem *addr; + char name[0]; +}; + +static irqreturn_t rotary_interrupt(int irq, void *dev_id) +{ + struct event_dev *edev = dev_id; + unsigned type, code, value; + + type = __raw_readl(edev->addr + REG_READ); + code = __raw_readl(edev->addr + REG_READ); + value = __raw_readl(edev->addr + REG_READ); + + input_event(edev->input, type, code, value); + return IRQ_HANDLED; +} + +static void rotary_import_bits(struct event_dev *edev, + unsigned long bits[], unsigned type, size_t count) +{ + void __iomem *addr = edev->addr; + int i, j; + size_t size; + uint8_t val; + + __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE); + + size = __raw_readl(addr + REG_LEN) * 8; + if (size < count) + count = size; + + addr += REG_DATA; + for (i = 0; i < count; i += 8) { + val = __raw_readb(addr++); + for (j = 0; j < 8; j++) + if (val & 1 << j) + set_bit(i + j, bits); + } +} + +static void rotary_import_abs_params(struct event_dev *edev) +{ + struct input_dev *input_dev = edev->input; + void __iomem *addr = edev->addr; + u32 val[4]; + int count; + int i, j; + + __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE); + + count = __raw_readl(addr + REG_LEN) / sizeof(val); + if (count > ABS_MAX) + count = ABS_MAX; + + for (i = 0; i < count; i++) { + if (!test_bit(i, input_dev->absbit)) + continue; + + for (j = 0; j < ARRAY_SIZE(val); j++) { + int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32); + val[j] = __raw_readl(edev->addr + REG_DATA + offset); + } + + input_set_abs_params(input_dev, i, + val[0], val[1], val[2], val[3]); + } +} + +static int rotary_probe(struct platform_device *pdev) +{ + struct input_dev *input_dev; + struct event_dev *edev; + struct resource *res; + unsigned keymapnamelen; + void __iomem *addr; + int irq; + int i; + int error; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + addr = devm_ioremap(&pdev->dev, res->start, 4096); + if (!addr) + return -ENOMEM; + + __raw_writel(PAGE_NAME, addr + REG_SET_PAGE); + keymapnamelen = __raw_readl(addr + REG_LEN); + + edev = devm_kzalloc(&pdev->dev, + sizeof(struct event_dev) + keymapnamelen + 1, + GFP_KERNEL); + if (!edev) + return -ENOMEM; + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) + return -ENOMEM; + + edev->input = input_dev; + edev->addr = addr; + edev->irq = irq; + + for (i = 0; i < keymapnamelen; i++) + edev->name[i] = __raw_readb(edev->addr + REG_DATA + i); + + pr_debug("rotary_probe() keymap=%s\n", edev->name); + + input_dev->name = edev->name; + input_dev->id.bustype = BUS_HOST; + rotary_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX); + rotary_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX); + rotary_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX); + + rotary_import_abs_params(edev); + + error = devm_request_irq(&pdev->dev, edev->irq, rotary_interrupt, 0, + "goldfish-rotary", edev); + if (error) + return error; + + error = input_register_device(input_dev); + if (error) + return error; + + return 0; +} + +static const struct of_device_id goldfish_rotary_of_match[] = { + { .compatible = "generic,goldfish-rotary", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_rotary_of_match); + +static const struct acpi_device_id goldfish_rotary_acpi_match[] = { + { "GFSH0008", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_rotary_acpi_match); + +static struct platform_driver rotary_driver = { + .probe = rotary_probe, + .driver = { + .owner = THIS_MODULE, + .name = "goldfish_rotary", + .of_match_table = goldfish_rotary_of_match, + .acpi_match_table = ACPI_PTR(goldfish_rotary_acpi_match), + }, +}; + +module_platform_driver(rotary_driver); + +MODULE_AUTHOR("Nimrod Gileadi"); +MODULE_DESCRIPTION("Goldfish Rotary Encoder Device"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 7f12b6579f82..795fa353de7c 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); + spin_lock_irq(&keypad->lock); keypad->stopped = true; - mb(); + spin_unlock_irq(&keypad->lock); + flush_work(&keypad->work.work); /* * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c index 5a5778729e37..76bb51309a78 100644 --- a/drivers/input/keyboard/qt1070.c +++ b/drivers/input/keyboard/qt1070.c @@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = { }; MODULE_DEVICE_TABLE(i2c, qt1070_id); +#ifdef CONFIG_OF +static const struct of_device_id qt1070_of_match[] = { + { .compatible = "qt1070", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qt1070_of_match); +#endif + static struct i2c_driver qt1070_driver = { .driver = { .name = "qt1070", + .of_match_table = of_match_ptr(qt1070_of_match), .pm = &qt1070_pm_ops, }, .id_table = qt1070_id, diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index 3048ef3e3e16..a5e8998047fe 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c @@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data) input_event(input, EV_MSC, MSC_SCAN, code); input_report_key(input, keymap[code], state); - /* Read for next loop */ - error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, ®); } while (1); input_sync(input); diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index 603fc2fadf05..12b20840fb74 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -70,7 +70,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev) pwr->phys = "twl4030_pwrbutton/input0"; pwr->dev.parent = &pdev->dev; - err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq, + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, powerbutton_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "twl4030_pwrbutton", pwr); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index c9d491bc85e0..3851d5715772 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client, return error; } + /* Make sure there is something at this address */ + error = i2c_smbus_read_byte(client); + if (error < 0) { + dev_dbg(&client->dev, "nothing at this address: %d\n", error); + return -ENXIO; + } + /* Initialize the touchpad. */ error = elan_initialize(data); if (error) diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index a679e56c44cd..765879dcaf85 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, long ret; int error; int len; - u8 buffer[ETP_I2C_INF_LENGTH]; + u8 buffer[ETP_I2C_REPORT_LEN]; + + len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); + if (len != ETP_I2C_REPORT_LEN) { + error = len < 0 ? len : -EIO; + dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", + error, len); + } reinit_completion(completion); enable_irq(client->irq); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 51b96e9bf793..06ea28e5d7b4 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1715,6 +1715,17 @@ int elantech_init(struct psmouse *psmouse) etd->samples[0], etd->samples[1], etd->samples[2]); } + if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) { + /* + * This module has a bug which makes absolute mode + * unusable, so let's abort so we'll be using standard + * PS/2 protocol. + */ + psmouse_info(psmouse, + "absolute mode broken, forcing standard PS/2 protocol\n"); + goto init_fail; + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index b604564dec5c..30328e57fdda 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -15,6 +15,7 @@ #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 +#include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> @@ -103,7 +104,7 @@ struct mousedev_client { spinlock_t packet_lock; int pos_x, pos_y; - signed char ps2[6]; + u8 ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; @@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev, } client->pos_x += packet->dx; - client->pos_x = client->pos_x < 0 ? - 0 : (client->pos_x >= xres ? xres : client->pos_x); + client->pos_x = clamp_val(client->pos_x, 0, xres); + client->pos_y += packet->dy; - client->pos_y = client->pos_y < 0 ? - 0 : (client->pos_y >= yres ? yres : client->pos_y); + client->pos_y = clamp_val(client->pos_y, 0, yres); p->dx += packet->dx; p->dy += packet->dy; @@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file) return error; } -static inline int mousedev_limit_delta(int delta, int limit) -{ - return delta > limit ? limit : (delta < -limit ? -limit : delta); -} - -static void mousedev_packet(struct mousedev_client *client, - signed char *ps2_data) +static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; + s8 dx, dy, dz; + + dx = clamp_val(p->dx, -127, 127); + p->dx -= dx; + + dy = clamp_val(p->dy, -127, 127); + p->dy -= dy; - ps2_data[0] = 0x08 | - ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); - ps2_data[1] = mousedev_limit_delta(p->dx, 127); - ps2_data[2] = mousedev_limit_delta(p->dy, 127); - p->dx -= ps2_data[1]; - p->dy -= ps2_data[2]; + ps2_data[0] = BIT(3); + ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); + ps2_data[0] |= p->buttons & 0x07; + ps2_data[1] = dx; + ps2_data[2] = dy; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: - ps2_data[3] = mousedev_limit_delta(p->dz, 7); - p->dz -= ps2_data[3]; - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); + dz = clamp_val(p->dz, -7, 7); + p->dz -= dz; + + ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); - ps2_data[3] = mousedev_limit_delta(p->dz, 127); - p->dz -= ps2_data[3]; + dz = clamp_val(p->dz, -127, 127); + p->dz -= dz; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + ps2_data[3] = dz; + client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); p->dz = 0; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + client->bufsiz = 3; break; } @@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; - signed char data[sizeof(client->ps2)]; + u8 data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index d1051e3ce819..e484ea2dc787 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { + { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + }, + }, + { } +}; + /* * On some Asus laptops, just running self tests cause problems. */ @@ -693,6 +707,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { }, }, { + /* Lenovo ThinkPad L460 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + }, + }, + { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), @@ -1223,6 +1244,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_forcemux_table)) + i8042_nomux = false; + if (dmi_check_system(i8042_dmi_notimeout_table)) i8042_notimeout = true; diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c index 71b5a634cf6d..e7bb155911d0 100644 --- a/drivers/input/touchscreen/ar1021_i2c.c +++ b/drivers/input/touchscreen/ar1021_i2c.c @@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume); static const struct i2c_device_id ar1021_i2c_id[] = { - { "MICROCHIP_AR1021_I2C", 0 }, + { "ar1021", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id); diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c index 5d0cd51c6f41..a4b7b4c3d27b 100644 --- a/drivers/input/touchscreen/tsc2007.c +++ b/drivers/input/touchscreen/tsc2007.c @@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client, tsc2007_stop(ts); + /* power down the chip (TSC2007_SETUP does not ACK on I2C) */ + err = tsc2007_xfer(ts, PWRDOWN); + if (err < 0) { + dev_err(&client->dev, + "Failed to setup chip: %d\n", err); + return err; /* usually, chip does not respond */ + } + err = input_register_device(input_dev); if (err) { dev_err(&client->dev, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f929879ecae6..a7d516f973dd 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -127,6 +127,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); + iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); @@ -142,9 +143,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index fa0adef32bd6..62739766b60b 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -126,7 +126,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, break; /* found a free slot */ } adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo - 1; + limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; move_left: prev = curr; curr = rb_prev(curr); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3dc5b65f3990..b98d38f95237 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1295,6 +1295,7 @@ static int __init omap_iommu_init(void) const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; + int ret; np = of_find_matching_node(NULL, omap_iommu_of_match); if (!np) @@ -1308,11 +1309,25 @@ static int __init omap_iommu_init(void) return -ENOMEM; iopte_cachep = p; - bus_set_iommu(&platform_bus_type, &omap_iommu_ops); - omap_iommu_debugfs_init(); - return platform_driver_register(&omap_iommu_driver); + ret = platform_driver_register(&omap_iommu_driver); + if (ret) { + pr_err("%s: failed to register driver\n", __func__); + goto fail_driver; + } + + ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + if (ret) + goto fail_bus; + + return 0; + +fail_bus: + platform_driver_unregister(&omap_iommu_driver); +fail_driver: + kmem_cache_destroy(iopte_cachep); + return ret; } subsys_initcall(omap_iommu_init); /* must be ready before omap3isp is probed */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c5f1757ac61d..82e00e3ad0e0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -663,7 +663,7 @@ static struct irq_chip its_irq_chip = { * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. */ #define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) +#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) static unsigned long *lpi_bitmap; static u32 lpi_chunks; @@ -1168,11 +1168,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * At least one bit of EventID is being used, hence a minimum - * of two entries. No, the architecture doesn't let you - * express an ITT with a single entry. + * We allocate at least one chunk worth of LPIs bet device, + * and thus that many ITEs. The device may require less though. */ - nr_ites = max(2UL, roundup_pow_of_two(nvecs)); + nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 9cb4b621fbc3..b92a19a594a1 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) if (sk->sk_state != MISDN_BOUND) continue; if (!cskb) - cskb = skb_copy(skb, GFP_KERNEL); + cskb = skb_copy(skb, GFP_ATOMIC); if (!cskb) { printk(KERN_WARNING "%s no skb\n", __func__); break; diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 92b6798ef5b3..c1c3af089634 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -149,7 +149,7 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { - led_stop_software_blink(led_cdev); + del_timer_sync(&led_cdev->blink_timer); led_cdev->flags &= ~LED_BLINK_ONESHOT; led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index e8b1120f486d..eef3e64ca0a8 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -88,21 +88,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, down_read(&led_cdev->trigger_lock); if (!led_cdev->trigger) - len += sprintf(buf+len, "[none] "); + len += scnprintf(buf+len, PAGE_SIZE - len, "[none] "); else - len += sprintf(buf+len, "none "); + len += scnprintf(buf+len, PAGE_SIZE - len, "none "); list_for_each_entry(trig, &trigger_list, next_trig) { if (led_cdev->trigger && !strcmp(led_cdev->trigger->name, trig->name)) - len += sprintf(buf+len, "[%s] ", trig->name); + len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ", + trig->name); else - len += sprintf(buf+len, "%s ", trig->name); + len += scnprintf(buf+len, PAGE_SIZE - len, "%s ", + trig->name); } up_read(&led_cdev->trigger_lock); up_read(&triggers_list_lock); - len += sprintf(len+buf, "\n"); + len += scnprintf(len+buf, PAGE_SIZE - len, "\n"); return len; } EXPORT_SYMBOL_GPL(led_trigger_show); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index b775e1efecd3..b9f71a87b7e1 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client, "slave address 0x%02x\n", id->name, chip->bits, client->addr); - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (pdata) { diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index b4ec36fd3cdf..c90633b16fad 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -319,7 +319,7 @@ static inline int get_current_reg_code(int target_curr_ma, int ires_ua) if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000))) return 0; - return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1; + return DIV_ROUND_CLOSEST(target_curr_ma * 1000, ires_ua) - 1; } static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data) @@ -391,7 +391,7 @@ led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev) static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) { int rc, i, addr_offset; - u8 val = 0, mask; + u8 val = 0, mask, strobe_mask = 0, strobe_ctrl; for (i = 0; i < led->num_fnodes; i++) { addr_offset = led->fnode[i].id; @@ -402,6 +402,51 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; val |= 0x1 << led->fnode[i].id; + + if (led->fnode[i].strobe_sel == HW_STROBE) { + if (led->fnode[i].id == LED3) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + else + strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT; + } + + if (led->fnode[i].id == LED3 && + led->fnode[i].strobe_sel == LPG_STROBE) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + /* + * As per the hardware recommendation, to use LED2/LED3 in HW + * strobe mode, LED1 should be set to HW strobe mode as well. + */ + if (led->fnode[i].strobe_sel == HW_STROBE && + (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) { + mask = FLASH_HW_STROBE_MASK; + addr_offset = led->fnode[LED1].id; + /* + * HW_STROBE: enable, TRIGGER: level, + * POLARITY: active high + */ + strobe_ctrl = BIT(2) | BIT(0); + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_STROBE_CTRL( + led->base + addr_offset), + mask, strobe_ctrl); + if (rc < 0) + return rc; + } + } + + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), + strobe_mask, 0); + if (rc < 0) + return rc; + + if (led->fnode[LED3].strobe_sel == LPG_STROBE) { + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_LPG_INPUT_CTRL(led->base), + LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); + if (rc < 0) + return rc; } rc = qpnp_flash_led_write(led, @@ -595,19 +640,6 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; } - if (led->fnode[LED3].strobe_sel == LPG_STROBE) { - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), - LED3_FLASH_ONCE_ONLY_BIT, 0); - if (rc < 0) - return rc; - - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_LPG_INPUT_CTRL(led->base), - LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); - if (rc < 0) - return rc; - } return 0; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 4d46f2ce606f..aa84fcfd59fc 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -514,15 +514,21 @@ struct open_bucket { /* * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. + * write streams for better cache utilization: first we try to segregate flash + * only volume write streams from cached devices, secondly we look for a bucket + * where the last write to it was sequential with the current write, and + * failing that we look for a bucket that was last used by the same task. * * The ideas is if you've got multiple tasks pulling data into the cache at the * same time, you'll get better cache utilization if you try to segregate their * data and preserve locality. * - * For example, say you've starting Firefox at the same time you're copying a + * For example, dirty sectors of flash only volume is not reclaimable, if their + * dirty sectors mixed with dirty sectors of cached device, such buckets will + * be marked as dirty and won't be reclaimed, though the dirty data of cached + * device have been written back to backend device. + * + * And say you've starting Firefox at the same time you're copying a * bunch of files. Firefox will likely end up being fairly hot and stay in the * cache awhile, but the data you copied might not be; if you wrote all that * data to the same buckets it'd get invalidated at the same time. @@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, struct open_bucket *ret, *ret_task = NULL; list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) + continue; + else if (!bkey_cmp(&ret->key, search)) goto found; else if (ret->last_write_point == write_point) ret_task = ret; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c2248b75f2da..b9a526271f02 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -890,6 +890,12 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); + cancel_delayed_work_sync(&dc->writeback_rate_update); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { + kthread_stop(dc->writeback_thread); + dc->writeback_thread = NULL; + } + memset(&dc->sb.set_uuid, 0, 16); SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); @@ -935,6 +941,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; char buf[BDEVNAME_SIZE]; + struct cached_dev *exist_dc, *t; bdevname(dc->bdev, buf); @@ -958,6 +965,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) return -EINVAL; } + /* Check whether already attached */ + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { + pr_err("Tried to attach %s but duplicate UUID already attached", + buf); + + return -EINVAL; + } + } + u = uuid_find(c, dc->sb.uuid); if (u && diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 81c5e1a1f363..1b84d2890fbf 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -300,6 +300,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, else if (rw & REQ_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { + atomic_inc(&io->count); dec_count(io, region, -EOPNOTSUPP); return; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 7baeeafa059d..065d7cee0d21 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1773,12 +1773,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param) cmd == DM_LIST_VERSIONS_CMD) return 0; - if ((cmd == DM_DEV_CREATE_CMD)) { + if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; } - } else if ((*param->uuid && *param->name)) { + } else if (*param->uuid && *param->name) { DMWARN("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 494d01d0e92a..a7a561af05c9 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -945,8 +945,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); - if (ret) + if (ret) { + unlock_comm(cinfo); return ret; + } cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a67e1a36733f..45e7a47e5f7b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2698,6 +2698,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) list_add(&r10_bio->retry_list, &conf->bio_end_io_list); conf->nr_queued++; spin_unlock_irq(&conf->device_lock); + /* + * In case freeze_array() is waiting for condition + * nr_pending == nr_queued + extra to be true. + */ + wake_up(&conf->wait_barrier); md_wakeup_thread(conf->mddev->thread); } else { if (test_bit(R10BIO_WriteError, @@ -3633,6 +3638,7 @@ static int run(struct mddev *mddev) if (blk_queue_discard(bdev_get_queue(rdev->bdev))) discard_supported = true; + first = 0; } if (mddev->queue) { @@ -4039,6 +4045,7 @@ static int raid10_start_reshape(struct mddev *mddev) diff = 0; if (first || diff < min_offset_diff) min_offset_diff = diff; + first = 0; } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77403228e098..9284acea4f7b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) { int i; - local_irq_disable(); - spin_lock(conf->hash_locks); + spin_lock_irq(conf->hash_locks); for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); spin_lock(&conf->device_lock); @@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) { int i; spin_unlock(&conf->device_lock); - for (i = NR_STRIPE_HASH_LOCKS; i; i--) - spin_unlock(conf->hash_locks + i - 1); - local_irq_enable(); + for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) + spin_unlock(conf->hash_locks + i); + spin_unlock_irq(conf->hash_locks); } /* bio's attached to a stripe+device for I/O are linked together in bi_sector @@ -726,12 +725,11 @@ static bool is_full_stripe_write(struct stripe_head *sh) static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) { - local_irq_disable(); if (sh1 > sh2) { - spin_lock(&sh2->stripe_lock); + spin_lock_irq(&sh2->stripe_lock); spin_lock_nested(&sh1->stripe_lock, 1); } else { - spin_lock(&sh1->stripe_lock); + spin_lock_irq(&sh1->stripe_lock); spin_lock_nested(&sh2->stripe_lock, 1); } } @@ -739,8 +737,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) { spin_unlock(&sh1->stripe_lock); - spin_unlock(&sh2->stripe_lock); - local_irq_enable(); + spin_unlock_irq(&sh2->stripe_lock); } /* Only freshly new full stripe normal write stripe can be added to a batch list */ @@ -3372,9 +3369,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); BUG_ON(test_bit(R5_Wantread, &dev->flags)); BUG_ON(sh->batch_head); + + /* + * In the raid6 case if the only non-uptodate disk is P + * then we already trusted P to compute the other failed + * drives. It is safe to compute rather than re-read P. + * In other cases we only compute blocks from failed + * devices, otherwise check/repair might fail to detect + * a real inconsistency. + */ + if ((s->uptodate == disks - 1) && + ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || (s->failed && (disk_idx == s->failed_num[0] || - disk_idx == s->failed_num[1]))) { + disk_idx == s->failed_num[1])))) { /* have disk failed, and we're requested to fetch it; * do compute it */ diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index fb66184dc9b6..77cf211e842e 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -750,6 +750,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b goto exit; } + /* + * It may need some time for the CAM to settle down, or there might + * be a race condition between the CAM, writing HC and our last + * check for DA. This happens, if the CAM asserts DA, just after + * checking DA before we are setting HC. In this case it might be + * a bug in the CAM to keep the FR bit, the lower layer/HW + * communication requires a longer timeout or the CAM needs more + * time internally. But this happens in reality! + * We need to read the status from the HW again and do the same + * we did for the previous check for DA + */ + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) + goto exit; + + if (status & (STATUSREG_DA | STATUSREG_RE)) { + if (status & STATUSREG_DA) + dvb_ca_en50221_thread_wakeup(ca); + + status = -EAGAIN; + goto exit; + } + /* send the amount of data */ if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) goto exit; diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index feeeb70d841e..d14d075ab1d6 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1281,11 +1281,12 @@ static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan) * New users must use I2C client binding directly! */ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) + struct i2c_adapter *i2c, + struct i2c_adapter **tuner_i2c_adapter) { struct i2c_client *client; struct i2c_board_info board_info; - struct m88ds3103_platform_data pdata; + struct m88ds3103_platform_data pdata = {}; pdata.clk = cfg->clock; pdata.i2c_wr_max = cfg->i2c_wr_max; @@ -1428,6 +1429,8 @@ static int m88ds3103_probe(struct i2c_client *client, case M88DS3103_CHIP_ID: break; default: + ret = -ENODEV; + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); goto err_kfree; } diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 821a8f481507..9d6270591858 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -14,6 +14,8 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> + #include "si2168_priv.h" static const struct dvb_frontend_ops si2168_ops; @@ -420,6 +422,7 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; + udelay(100); memcpy(cmd.args, "\x85", 1); cmd.wlen = 1; cmd.rlen = 1; diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c index 9c8159cc737a..43a5f3da5ac4 100644 --- a/drivers/media/i2c/adv7481.c +++ b/drivers/media/i2c/adv7481.c @@ -55,6 +55,8 @@ #define LOCK_MAX_SLEEP 6000 #define LOCK_NUM_TRIES 200 +#define MAX_DEFAULT_WIDTH 1280 +#define MAX_DEFAULT_HEIGHT 720 #define MAX_DEFAULT_FRAME_RATE 60 #define MAX_DEFAULT_PIX_CLK_HZ 74240000 @@ -1705,7 +1707,8 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, } else { pr_err("%s(%d): PLL not locked return EBUSY\n", __func__, __LINE__); - return -EBUSY; + ret = -EBUSY; + goto set_default; } /* Check Timing Lock */ @@ -1825,6 +1828,17 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, (hdmi_params->pix_rep + 1)); } +set_default: + if (ret) { + pr_debug("%s(%d), error %d resort to default fmt\n", + __func__, __LINE__, ret); + vid_params->act_pix = MAX_DEFAULT_WIDTH; + vid_params->act_lines = MAX_DEFAULT_HEIGHT; + vid_params->fr_rate = MAX_DEFAULT_FRAME_RATE; + vid_params->pix_clk = MAX_DEFAULT_PIX_CLK_HZ; + vid_params->intrlcd = 0; + ret = 0; + } pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d @ %d fps\n", __func__, __LINE__, diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index fe6eb78b6914..a47ab1947cc4 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } /* 6. */ cx25840_write(client, 0x115, 0x8c); @@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } /* Call the cx23888 specific std setup func, we no longer rely on * the generic cx24840 func. @@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } cx25840_std_setup(client); diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c index 1f8af1ee8352..1e4783b51a35 100644 --- a/drivers/media/i2c/soc_camera/ov6650.c +++ b/drivers/media/i2c/soc_camera/ov6650.c @@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client, priv->code = MEDIA_BUS_FMT_YUYV8_2X8; priv->colorspace = V4L2_COLORSPACE_JPEG; - priv->clk = v4l2_clk_get(&client->dev, "mclk"); + priv->clk = v4l2_clk_get(&client->dev, NULL); if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); goto eclkget; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index ea2777e1ee10..bc630a719776 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -226,7 +226,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, u8 mask, u8 val) { - i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); + i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); } static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index 8aa726651630..90fcccc05b56 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) bt878_num); if (bt878_num >= BT878_MAX) { printk(KERN_ERR "bt878: Too many devices inserted\n"); - result = -ENOMEM; - goto fail0; + return -ENOMEM; } if (pci_enable_device(dev)) return -EIO; diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c index f7ce493b1fee..a0b61e88c838 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c @@ -342,6 +342,17 @@ static void solo_stop_streaming(struct vb2_queue *q) struct solo_dev *solo_dev = vb2_get_drv_priv(q); solo_stop_thread(solo_dev); + + spin_lock(&solo_dev->slock); + while (!list_empty(&solo_dev->vidq_active)) { + struct solo_vb2_buf *buf = list_entry( + solo_dev->vidq_active.next, + struct solo_vb2_buf, list); + + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + } + spin_unlock(&solo_dev->slock); INIT_LIST_HEAD(&solo_dev->vidq_active); } diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c index 76fe7dfa68cb..61200d379a1d 100644 --- a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c +++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -754,9 +754,12 @@ static int msm_jpegdma_s_fmt_vid_out(struct file *file, static int msm_jpegdma_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { + int ret = 0; struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh); - - return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req); + mutex_lock(&ctx->lock); + ret = v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req); + mutex_unlock(&ctx->lock); + return ret; } /* @@ -833,11 +836,11 @@ static int msm_jpegdma_streamoff(struct file *file, { struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh); int ret; - + mutex_lock(&ctx->lock); ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type); if (ret < 0) dev_err(ctx->jdma_device->dev, "Stream off fails\n"); - + mutex_unlock(&ctx->lock); return ret; } diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h index 392d902d3e0c..0486c8aa96d0 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -56,7 +56,12 @@ static struct csid_reg_parms_t csid_v3_5 = { 0xC, 0x84, 0xA4, - 0x7f010800, + /* + * Default IRQ enabled: + * FIFO overflow, Unbounded frame, Stream underflow, + * Error ECC, Error CRC, Reset done + */ + 0x73000800, 20, 17, 16, diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c index 2b3eefa65606..6d26dff7525d 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c +++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,12 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/irqreturn.h> +#include <media/v4l2-subdev.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> #include "msm_csid.h" #include "msm_sd.h" #include "msm_camera_io_util.h" @@ -51,12 +57,13 @@ #define CSID_VERSION_V40 0x40000000 #define MSM_CSID_DRV_NAME "msm_csid" -#define DBG_CSID 0 +#define DBG_CSID 1 #define SHORT_PKT_CAPTURE 0 #define SHORT_PKT_OFFSET 0x200 #define ENABLE_3P_BIT 1 #define SOF_DEBUG_ENABLE 1 #define SOF_DEBUG_DISABLE 0 +#define MAX_CSID_V4l2_EVENTS 100 #define TRUE 1 #define FALSE 0 @@ -156,6 +163,25 @@ static int msm_csid_stop(struct csid_device *csid_dev, uint32_t cid_mask) return 0; } +static int msm_csid_send_event(struct csid_device *csid_dev, + uint32_t event_type, struct msm_csid_event_data *event_data) +{ + struct v4l2_event csid_event; + struct msm_csid_event_data *d_event_data; + + memset(&csid_event, 0, sizeof(struct v4l2_event)); + csid_event.id = 0; + csid_event.type = event_type; + + d_event_data = + (struct msm_csid_event_data *)(&csid_event.u.data[0]); + d_event_data->csid_id = event_data->csid_id; + d_event_data->error_status = event_data->error_status; + + v4l2_event_queue(csid_dev->msm_sd.sd.devnode, &csid_event); + return 0; +} + #if (DBG_CSID) static void msm_csid_set_debug_reg(struct csid_device *csid_dev, struct msm_camera_csid_params *csid_params) @@ -490,8 +516,9 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data) #else static irqreturn_t msm_csid_irq(int irq_num, void *data) { - uint32_t irq; + uint32_t irq, error_irq, rst_done_irq_mask; struct csid_device *csid_dev = data; + struct msm_csid_event_data csid_event; if (!csid_dev) { pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__); @@ -509,11 +536,26 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data) irq = msm_camera_io_r(csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr); + irq &= msm_camera_io_r(csid_dev->base + + csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr); pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n", __func__, csid_dev->pdev->id, irq); - if (irq & (0x1 << - csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift)) + error_irq = irq; + rst_done_irq_mask = + 0x1 << csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift; + + if (irq & rst_done_irq_mask) { complete(&csid_dev->reset_complete); + error_irq &= ~rst_done_irq_mask; + } + + if (error_irq) { + csid_event.csid_id = csid_dev->pdev->id; + csid_event.error_status = error_irq; + msm_csid_send_event(csid_dev, CSID_EVENT_SIGNAL_ERROR, + &csid_event); + } + msm_camera_io_w(irq, csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr); return IRQ_HANDLED; @@ -890,7 +932,6 @@ static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd, return rc; } - #ifdef CONFIG_COMPAT static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void *arg) { @@ -1061,27 +1102,135 @@ static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd, mutex_unlock(&csid_dev->mutex); return rc; } +#endif -static long msm_csid_subdev_do_ioctl32( +static long msm_csid_subdev_do_ioctl( struct file *file, unsigned int cmd, void *arg) { + int rc = -ENOIOCTLCMD; struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + + switch (cmd) { + case VIDIOC_DQEVENT: + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + return v4l2_event_dequeue(vfh, arg, + file->f_flags & O_NONBLOCK); + case VIDIOC_SUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); + + case VIDIOC_UNSUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); + + case VIDIOC_MSM_CSID_IO_CFG32: +#ifdef CONFIG_COMPAT + rc = msm_csid_subdev_ioctl32(sd, cmd, arg); +#endif + break; - return msm_csid_subdev_ioctl32(sd, cmd, arg); + default: + rc = msm_csid_subdev_ioctl(sd, cmd, arg); + break; + } + + return rc; } -static long msm_csid_subdev_fops_ioctl32(struct file *file, unsigned int cmd, +static long msm_csid_subdev_fops_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl32); + return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl); } -#endif + +static u32 msm_csid_evt_mask_to_csid_event(u32 evt_mask) +{ + u32 evt_id = CSID_EVENT_SUBS_MASK_NONE; + + switch (evt_mask) { + case CSID_EVENT_MASK_INDEX_SIGNAL_ERROR: + evt_id = CSID_EVENT_SIGNAL_ERROR; + break; + default: + evt_id = CSID_EVENT_SUBS_MASK_NONE; + break; + } + + return evt_id; +} + +static int msm_csid_subscribe_event_mask(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, int evt_mask_index, + u32 evt_id, bool subscribe_flag) +{ + int rc = 0; + + sub->type = evt_id; + + if (subscribe_flag) + rc = v4l2_event_subscribe(fh, sub, + MAX_CSID_V4l2_EVENTS, NULL); + else + rc = v4l2_event_unsubscribe(fh, sub); + if (rc != 0) { + pr_err("%s: Subs event_type =0x%x failed\n", + __func__, sub->type); + return rc; + } + return rc; +} + +static int msm_csid_process_event_subscription(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, bool subscribe_flag) +{ + int rc = 0, evt_mask_index = 0; + u32 evt_mask = sub->type; + u32 evt_id = 0; + + if (evt_mask == CSID_EVENT_SUBS_MASK_NONE) { + pr_err("%s: Subs event_type is None=0x%x\n", + __func__, evt_mask); + return 0; + } + + evt_mask_index = CSID_EVENT_MASK_INDEX_SIGNAL_ERROR; + if (evt_mask & (1<<evt_mask_index)) { + evt_id = + msm_csid_evt_mask_to_csid_event( + evt_mask_index); + rc = msm_csid_subscribe_event_mask(fh, sub, + evt_mask_index, evt_id, subscribe_flag); + if (rc != 0) { + pr_err("%s: Subs event index:%d failed\n", + __func__, evt_mask_index); + return rc; + } + } + + return rc; +} +static int msm_csid_subscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_csid_process_event_subscription(fh, sub, true); +} + +static int msm_csid_unsubscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_csid_process_event_subscription(fh, sub, false); +} + static const struct v4l2_subdev_internal_ops msm_csid_internal_ops; static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = { .ioctl = &msm_csid_subdev_ioctl, .interrupt_service_routine = msm_csid_irq_routine, + .subscribe_event = msm_csid_subscribe_event, + .unsubscribe_event = msm_csid_unsubscribe_event, }; static const struct v4l2_subdev_ops msm_csid_subdev_ops = { @@ -1175,6 +1324,7 @@ static int csid_probe(struct platform_device *pdev) new_csid_dev->pdev = pdev; new_csid_dev->msm_sd.sd.internal_ops = &msm_csid_internal_ops; new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; snprintf(new_csid_dev->msm_sd.sd.name, ARRAY_SIZE(new_csid_dev->msm_sd.sd.name), "msm_csid"); media_entity_init(&new_csid_dev->msm_sd.sd.entity, 0, NULL, 0); @@ -1183,11 +1333,12 @@ static int csid_probe(struct platform_device *pdev) new_csid_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x5; msm_sd_register(&new_csid_dev->msm_sd); -#ifdef CONFIG_COMPAT msm_cam_copy_v4l2_subdev_fops(&msm_csid_v4l2_subdev_fops); - msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl32; - new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops; + msm_csid_v4l2_subdev_fops.unlocked_ioctl = msm_csid_subdev_fops_ioctl; +#ifdef CONFIG_COMPAT + msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl; #endif + new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops; rc = msm_camera_register_irq(pdev, new_csid_dev->irq, msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev); diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c index 5e34016d199c..03ae276d1a6f 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -639,7 +639,7 @@ static irqreturn_t bridge_irq(int irq, void *dev) { struct msm_sensor_ctrl_t *s_ctrl = dev; - pr_err("msm_sensor_driver: received bridge interrupt:0x%x", + pr_debug("msm_sensor_driver: received bridge interrupt:0x%x\n", s_ctrl->sensordata->slave_info->sensor_slave_addr); schedule_delayed_work(&s_ctrl->irq_delayed_work, msecs_to_jiffies(0)); @@ -682,7 +682,7 @@ static void bridge_irq_delay_work(struct work_struct *work) &sensor_event); mutex_unlock(s_ctrl->msm_sensor_mutex); exit_queue: - pr_err("Work IRQ exit"); + pr_debug("Work IRQ exit\n"); } /* static function definition */ @@ -947,8 +947,6 @@ CSID_TG: goto free_camera_info; } - pr_err("%s probe succeeded", slave_info->sensor_name); - /* * Update the subdevice id of flash-src based on availability in kernel. */ @@ -1009,8 +1007,6 @@ CSID_TG: pr_err("%s: Failed gpio_direction irq %d", __func__, rc); goto cancel_work; - } else { - pr_err("sensor probe IRQ direction succeeded"); } } @@ -1035,7 +1031,7 @@ CSID_TG: } /* Keep irq enabled */ - pr_err("msm_sensor_driver.c irq number = %d", s_ctrl->irq); + pr_debug("msm_sensor_driver.c irq number = %d\n", s_ctrl->irq); } /* diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c index eab56b70e646..20a38925aa10 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -26,6 +26,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <media/videobuf2-core.h> +#include <media/msmb_generic_buf_mgr.h> #include "msm.h" #include "msm_buf_mgr.h" diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 4dc471b9c1c6..6db3c3c527a3 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -3095,12 +3095,18 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, return -EINVAL; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); - + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < stream_cfg_cmd->num_streams; i++) { if (stream_cfg_cmd->stream_handle[i] == 0) continue; stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); + + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); + return -EINVAL; + } if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX) src_state = axi_data->src_info[ SRC_TO_INTF(stream_info->stream_src)].active; @@ -3108,6 +3114,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, else { ISP_DBG("%s: invalid src info index\n", __func__); rc = -EINVAL; + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } spin_lock_irqsave(&stream_info->lock, flags); @@ -3119,6 +3126,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } @@ -3141,6 +3149,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, HANDLE_TO_IDX( stream_cfg_cmd->stream_handle[i])); spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } for (k = 0; k < stream_info->num_isp; k++) { @@ -3199,6 +3208,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, spin_unlock_irqrestore(&stream_info->lock, flags); streams[num_streams++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < MAX_VFE; i++) { vfe_dev = update_vfes[i]; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index 6c1d9ddc4232..3e8220005f77 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -1102,6 +1102,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, struct vfe_device *vfe_dev; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); num_stats_comp_mask = vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask; @@ -1120,6 +1121,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } rc = msm_isp_init_stats_ping_pong_reg( @@ -1127,6 +1129,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, if (rc < 0) { spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: No buffer for stream%d\n", __func__, idx); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); return rc; } init_completion(&stream_info->active_comp); @@ -1161,6 +1164,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, stats_data->num_active_stream); streams[num_stream++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (k = 0; k < MAX_VFE; k++) { if (!update_vfes[k] || num_active_streams[k]) diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index c132947b3790..684b331d9ac4 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -897,9 +897,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, case VIDIOC_MSM_ISP_CFG_STREAM: mutex_lock(&vfe_dev->core_mutex); MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev); - mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_axi_stream(vfe_dev, arg); - mutex_unlock(&vfe_dev->buf_mgr->lock); MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev); mutex_unlock(&vfe_dev->core_mutex); break; @@ -1016,9 +1014,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, case VIDIOC_MSM_ISP_CFG_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev); - mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_stats_stream(vfe_dev, arg); - mutex_unlock(&vfe_dev->buf_mgr->lock); MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev); mutex_unlock(&vfe_dev->core_mutex); break; diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c index f732f5180e81..58bfdb77a492 100644 --- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -166,6 +166,33 @@ static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev, return ret; } +static int32_t msm_buf_mngr_buf_error(struct msm_buf_mngr_device *buf_mngr_dev, + struct msm_buf_mngr_info *buf_info) +{ + unsigned long flags; + struct msm_get_bufs *bufs, *save; + int32_t ret = -EINVAL; + + spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags); + list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) { + if ((bufs->session_id == buf_info->session_id) && + (bufs->stream_id == buf_info->stream_id) && + (bufs->index == buf_info->index)) { + ret = buf_mngr_dev->vb2_ops.buf_error + (bufs->vb2_v4l2_buf, + buf_info->session_id, + buf_info->stream_id, + buf_info->frame_id, + &buf_info->timestamp, + buf_info->reserved); + list_del_init(&bufs->entry); + kfree(bufs); + break; + } + } + spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags); + return ret; +} static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev, struct msm_buf_mngr_info *buf_info) @@ -473,6 +500,9 @@ int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp) case VIDIOC_MSM_BUF_MNGR_BUF_DONE: rc = msm_buf_mngr_buf_done(msm_buf_mngr_dev, argp); break; + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: + rc = msm_buf_mngr_buf_error(msm_buf_mngr_dev, argp); + break; case VIDIOC_MSM_BUF_MNGR_PUT_BUF: rc = msm_buf_mngr_put_buf(msm_buf_mngr_dev, argp); break; @@ -571,6 +601,7 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd, case VIDIOC_MSM_BUF_MNGR_GET_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: case VIDIOC_MSM_BUF_MNGR_PUT_BUF: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: rc = msm_cam_buf_mgr_ops(cmd, argp); break; case VIDIOC_MSM_BUF_MNGR_INIT: @@ -719,6 +750,9 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file, case VIDIOC_MSM_BUF_MNGR_BUF_DONE32: cmd = VIDIOC_MSM_BUF_MNGR_BUF_DONE; break; + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR32: + cmd = VIDIOC_MSM_BUF_MNGR_BUF_ERROR; + break; case VIDIOC_MSM_BUF_MNGR_PUT_BUF32: cmd = VIDIOC_MSM_BUF_MNGR_PUT_BUF; break; @@ -737,6 +771,7 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file, switch (cmd) { case VIDIOC_MSM_BUF_MNGR_GET_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: case VIDIOC_MSM_BUF_MNGR_FLUSH: case VIDIOC_MSM_BUF_MNGR_PUT_BUF: { struct msm_buf_mngr_info32_t buf_info32; diff --git a/drivers/media/platform/msm/camera_v2/msm_sd.h b/drivers/media/platform/msm/camera_v2/msm_sd.h index d893d9fc07e3..3d5d3e03632e 100644 --- a/drivers/media/platform/msm/camera_v2/msm_sd.h +++ b/drivers/media/platform/msm/camera_v2/msm_sd.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -81,6 +81,9 @@ struct msm_sd_req_vb2_q { unsigned int stream_id, uint32_t sequence, struct timeval *ts, uint32_t reserved); int (*flush_buf)(int session_id, unsigned int stream_id); + int (*buf_error)(struct vb2_v4l2_buffer *vb2_v4l2_buf, int session_id, + unsigned int stream_id, uint32_t sequence, struct timeval *ts, + uint32_t reserved); }; #define MSM_SD_NOTIFY_GET_SD 0x00000001 diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c index e271c7fcd1b6..f2b048e37319 100644 --- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -457,6 +457,67 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, return rc; } +static int msm_vb2_buf_error(struct vb2_v4l2_buffer *vb, int session_id, + unsigned int stream_id, uint32_t sequence, + struct timeval *ts, uint32_t buf_type) +{ + unsigned long flags, rl_flags; + struct msm_vb2_buffer *msm_vb2; + struct msm_stream *stream; + struct msm_session *session; + struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + int rc = 0; + + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) + return -EINVAL; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + + spin_lock_irqsave(&stream->stream_lock, flags); + if (vb) { + list_for_each_entry(msm_vb2, &(stream->queued_list), list) { + vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf); + if (vb2_v4l2_buf == vb) + break; + } + if (vb2_v4l2_buf != vb) { + pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n", + session_id, stream_id, vb); + spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); + return -EINVAL; + } + msm_vb2 = + container_of(vb2_v4l2_buf, struct msm_vb2_buffer, + vb2_v4l2_buf); + /* put buf before buf done */ + if (msm_vb2->in_freeq) { + vb2_v4l2_buf->sequence = sequence; + vb2_v4l2_buf->timestamp = *ts; + vb2_buffer_done(&vb2_v4l2_buf->vb2_buf, + VB2_BUF_STATE_ERROR); + msm_vb2->in_freeq = 0; + rc = 0; + } else + rc = -EINVAL; + } else { + pr_err(" VB buffer is NULL for ses_id=%d, str_id=%d\n", + session_id, stream_id); + rc = -EINVAL; + } + spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return rc; +} + long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index) { @@ -555,6 +616,7 @@ int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req) req->put_buf = msm_vb2_put_buf; req->buf_done = msm_vb2_buf_done; req->flush_buf = msm_vb2_flush_buf; + req->buf_error = msm_vb2_buf_error; return 0; } diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c index 25fc34b26bc1..53a01aff4bdd 100644 --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1624,6 +1624,7 @@ static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev, case VIDIOC_MSM_BUF_MNGR_PUT_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: case VIDIOC_MSM_BUF_MNGR_GET_BUF: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: default: { struct msm_buf_mngr_info *buff_mgr_info = (struct msm_buf_mngr_info *)arg; @@ -3617,7 +3618,7 @@ STREAM_BUFF_END: break; } buff_mgr_info.frame_id = frame_info.frame_id; - rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE, + rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_ERROR, 0x0, &buff_mgr_info); if (rc < 0) { pr_err("error in buf done\n"); diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h index 6ed5c5c7dbce..323f99f25147 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h @@ -14,7 +14,6 @@ #define MSM_CSIPHY_3_5_HWREG_H #define ULPM_WAKE_UP_TIMER_MODE 2 -#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */ #include <sensor/csiphy/msm_csiphy.h> @@ -47,7 +46,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = { {0x138, 0x0}, {0x13C, 0x10}, {0x140, 0x1}, - {0x144, GLITCH_ELIMINATION_NUM}, + {0x144, 0x32}, {0x148, 0xFE}, {0x14C, 0x1}, {0x154, 0x0}, diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c index a8d7c1f8b489..4f7a62716810 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -52,6 +52,7 @@ #define MAX_DPHY_DATA_LN 4 #define CLOCK_OFFSET 0x700 #define CSIPHY_SOF_DEBUG_COUNT 2 +#define GBPS 1000000000 #undef CDBG #define CDBG(fmt, args...) pr_debug(fmt, ##args) @@ -134,8 +135,10 @@ static int msm_csiphy_3phase_lane_config( uint8_t i = 0; uint16_t lane_mask = 0, lane_enable = 0, temp; void __iomem *csiphybase; + uint64_t two_gbps = 0; csiphybase = csiphy_dev->base; + two_gbps = 2 * (uint64_t)csiphy_params->lane_cnt * GBPS; lane_mask = csiphy_params->lane_mask & 0x7; while (lane_mask != 0) { temp = (i << 1)+1; @@ -281,11 +284,20 @@ static int msm_csiphy_3phase_lane_config( csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr + 0x200*i); } - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + if ((csiphy_dev->hw_version == CSIPHY_VERSION_V35) && + (csiphy_params->data_rate > two_gbps)) { + msm_camera_io_w(0x40, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } else { + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } lane_mask >>= 1; i++; } @@ -797,10 +809,10 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev, ratio = csiphy_dev->csiphy_max_clk/clk_rate; csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio; } - CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n", + CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %llu\n", __func__, csiphy_params->lane_mask, - csiphy_params->lane_cnt); + csiphy_params->lane_cnt, csiphy_params->data_rate); CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n", __func__, csiphy_params->settle_cnt, csiphy_params->csid_core); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 8490a65ae1c6..a43404cad3e3 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei) static void channel_swdemux_tsklet(unsigned long data) { struct channel_info *channel = (struct channel_info *)data; - struct c8sectpfei *fei = channel->fei; + struct c8sectpfei *fei; unsigned long wp, rp; int pos, num_packets, n, size; u8 *buf; @@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data) if (unlikely(!channel || !channel->irec)) return; + fei = channel->fei; + wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0)); rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0)); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index f838d9c7ed12..0fba4a2c1602 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf, goto rc_dev_fail; /* wire up inbound data handler */ - usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, - mceusb_dev_recv, ir, ep_in->bInterval); + if (usb_endpoint_xfer_int(ep_in)) + usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, + mceusb_dev_recv, ir, ep_in->bInterval); + else + usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, + mceusb_dev_recv, ir); + ir->urb_in->transfer_dma = ir->dma_in; ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 9caea8344547..d793c630f1dd 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; @@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index 3bbc77aa6a33..483457d4904f 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -95,6 +95,8 @@ static int usbtv_probe(struct usb_interface *intf, return 0; usbtv_audio_fail: + /* we must not free at this point */ + usb_get_dev(usbtv->udev); usbtv_video_free(usbtv); usbtv_video_fail: diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 3dc9ed2e0774..bb1e19f7ed5a 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -205,6 +205,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, struct vb2_buffer *vb; int ret; + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ + num_buffers = min_t(unsigned int, num_buffers, + VB2_MAX_FRAME - q->num_buffers); + for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index bf23234d957e..b44d0e755675 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -133,23 +133,6 @@ static int __set_timestamp(struct vb2_buffer *vb, const void *pb) return 0; }; -static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) -{ - static bool check_once; - - if (check_once) - return; - - check_once = true; - WARN_ON(1); - - pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); - if (vb->vb2_queue->allow_zero_bytesused) - pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); - else - pr_warn("use the actual size instead.\n"); -} - static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, const char *opname) { @@ -357,9 +340,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *pdst = &planes[plane]; struct v4l2_plane *psrc = &b->m.planes[plane]; - if (psrc->bytesused == 0) - vb2_warn_zero_bytesused(vb); - if (vb->vb2_queue->allow_zero_bytesused) pdst->bytesused = psrc->bytesused; else @@ -394,9 +374,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, } if (V4L2_TYPE_IS_OUTPUT(b->type)) { - if (b->bytesused == 0) - vb2_warn_zero_bytesused(vb); - if (vb->vb2_queue->allow_zero_bytesused) planes[0].bytesused = b->bytesused; else diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c index 8f8bacb67a15..a6b5259ffbdd 100644 --- a/drivers/mfd/palmas.c +++ b/drivers/mfd/palmas.c @@ -430,6 +430,20 @@ static void palmas_power_off(void) { unsigned int addr; int ret, slave; + struct device_node *np = palmas_dev->dev->of_node; + + if (of_property_read_bool(np, "ti,palmas-override-powerhold")) { + addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE, + PALMAS_PRIMARY_SECONDARY_PAD2); + slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE); + + ret = regmap_update_bits(palmas_dev->regmap[slave], addr, + PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0); + if (ret) + dev_err(palmas_dev->dev, + "Unable to write PRIMARY_SECONDARY_PAD2 %d\n", + ret); + } if (!palmas_dev) return; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index cc91f7b3d90c..eb29113e0bac 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components, for (i = 0; i < components; i++) { edev->component[i].number = -1; edev->component[i].slot = -1; - edev->component[i].power_status = 1; + edev->component[i].power_status = -1; } mutex_lock(&container_list_lock); @@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev, if (edev->cb->get_power_status) edev->cb->get_power_status(edev, ecomp); + + /* If still uninitialized, the callback failed or does not exist. */ + if (ecomp->power_status == -1) + return (edev->cb->get_power_status) ? -EIO : -ENOTTY; + return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 4ef189a7a2fb..8c04e342e30a 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -571,7 +571,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; default: - dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); rets = -ENOIOCTLCMD; } diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index 0e6ab4e7c686..c52c8ccc90b7 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -14,6 +14,7 @@ */ #include <linux/atomic.h> +#include <linux/cpufreq_times.h> #include <linux/err.h> #include <linux/hashtable.h> #include <linux/init.h> @@ -344,13 +345,13 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime = 0; } - read_lock(&tasklist_lock); + rcu_read_lock(); do_each_thread(temp, task) { uid = from_kuid_munged(user_ns, task_uid(task)); if (!uid_entry || uid_entry->uid != uid) uid_entry = find_or_register_uid(uid); if (!uid_entry) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); rt_mutex_unlock(&uid_lock); pr_err("%s: failed to find the uid_entry for uid %d\n", __func__, uid); @@ -360,7 +361,7 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime += utime; uid_entry->active_stime += stime; } while_each_thread(temp, task); - read_unlock(&tasklist_lock); + rcu_read_unlock(); hash_for_each(hash_table, bkt, uid_entry, hash) { cputime_t total_utime = uid_entry->utime + @@ -421,6 +422,10 @@ static ssize_t uid_remove_write(struct file *file, kstrtol(end_uid, 10, &uid_end) != 0) { return -EINVAL; } + + /* Also remove uids from /proc/uid_time_in_state */ + cpufreq_task_times_remove_uids(uid_start, uid_end); + rt_mutex_lock(&uid_lock); for (; uid_start <= uid_end; uid_start++) { diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index f42d9c4e4561..cc277f7849b0 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags) size_t pas_size; size_t vas_size; size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + u64 num_pages; + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / (sizeof(*queue->kernel_if->u.g.pas) + @@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) { struct vmci_queue *queue; size_t queue_page_size; - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + u64 num_pages; const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / sizeof(*queue->kernel_if->u.h.page)) return NULL; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 7d2ceda7f80e..de7def1f4f1c 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1048,6 +1048,12 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev, goto idata_free; } + /* + * Ensure rpmb_req_pending flag is synchronized between multiple + * entities which may use rpmb ioclts with a lock. + */ + mutex_lock(&card->host->rpmb_req_mutex); + atomic_set(&card->host->rpmb_req_pending, 1); mmc_get_card(card); if (mmc_card_doing_bkops(card)) { @@ -1163,6 +1169,9 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev, cmd_rel_host: mmc_put_card(card); + atomic_set(&card->host->rpmb_req_pending, 0); + mutex_unlock(&card->host->rpmb_req_mutex); + idata_free: for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) { @@ -3173,11 +3182,11 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep( static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq, struct request *req) { - struct mmc_card *card = mq->card; - struct mmc_host *host = card->host; + struct request_queue *q = req->q; - blk_requeue_request(req->q, req); - mmc_put_card(host->card); + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, req); + spin_unlock_irq(q->queue_lock); } static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req) @@ -4065,9 +4074,16 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) * If issuing of the request fails with eitehr EBUSY or * EAGAIN error, re-queue the request. * This case would occur with ICE calls. + * For request which gets completed successfully or + * errored out, we release host lock in completion or + * error handling softirq context. But here the request + * is neither completed nor erred-out, so release the + * host lock explicitly. */ - if (ret == -EBUSY || ret == -EAGAIN) + if (ret == -EBUSY || ret == -EAGAIN) { mmc_blk_cmdq_requeue_rw_rq(mq, req); + mmc_put_card(host->card); + } } } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index ccf22eb5bdc0..397bbd09034d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -95,7 +95,9 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host, * be any other direct command active. * 3. cmdq state should be unhalted. * 4. cmdq state shouldn't be in error state. - * 5. free tag available to process the new request. + * 5. There is no outstanding RPMB request pending. + * 6. free tag available to process the new request. + * (This must be the last condtion to check) */ wait_event(ctx->wait, kthread_should_stop() || (mmc_peek_request(mq) && @@ -106,6 +108,7 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host, && !(!host->card->part_curr && mmc_host_cq_disable(host) && !mmc_card_suspended(host->card)) && !test_bit(CMDQ_STATE_ERR, &ctx->curr_state) + && !atomic_read(&host->rpmb_req_pending) && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked))); } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 548a9e8b72ae..0b527a708bd7 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -373,12 +373,13 @@ int mmc_add_card(struct mmc_card *card) mmc_card_ddr52(card) ? "DDR " : "", type); } else { - pr_info("%s: new %s%s%s%s%s card at address %04x\n", + pr_info("%s: new %s%s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_hs(card) ? "high speed " : ""), mmc_card_hs400(card) ? "HS400 " : (mmc_card_hs200(card) ? "HS200 " : ""), + mmc_card_hs400es(card) ? "Enhanced strobe " : "", mmc_card_ddr52(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index d1d045f04368..2af0e819d0cb 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1170,6 +1170,46 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) return 0; } +static int mmc_cmdq_check_retune(struct mmc_host *host) +{ + bool cmdq_mode; + int err = 0; + + if (!host->need_retune || host->doing_retune || !host->card || + mmc_card_hs400es(host->card) || + (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR)) + return 0; + + cmdq_mode = mmc_card_cmdq(host->card); + if (cmdq_mode) { + err = mmc_cmdq_halt(host, true); + if (err) { + pr_err("%s: %s: failed halting queue (%d)\n", + mmc_hostname(host), __func__, err); + host->cmdq_ops->dumpstate(host); + goto halt_failed; + } + } + + mmc_retune_hold(host); + err = mmc_retune(host); + mmc_retune_release(host); + + if (cmdq_mode) { + if (mmc_cmdq_halt(host, false)) { + pr_err("%s: %s: cmdq unhalt failed\n", + mmc_hostname(host), __func__); + host->cmdq_ops->dumpstate(host); + } + } + +halt_failed: + pr_debug("%s: %s: Retuning done err: %d\n", + mmc_hostname(host), __func__, err); + + return err; +} + static int mmc_start_cmdq_request(struct mmc_host *host, struct mmc_request *mrq) { @@ -1196,6 +1236,7 @@ static int mmc_start_cmdq_request(struct mmc_host *host, } mmc_host_clk_hold(host); + mmc_cmdq_check_retune(host); if (likely(host->cmdq_ops->request)) { ret = host->cmdq_ops->request(host, mrq); } else { @@ -1558,7 +1599,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host, mmc_card_removed(host->card)) { if (cmd->error && !cmd->retries && cmd->opcode != MMC_SEND_STATUS && - cmd->opcode != MMC_SEND_TUNING_BLOCK) + cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) mmc_recovery_fallback_lower_speed(host); break; } @@ -4493,6 +4535,14 @@ int mmc_pm_notify(struct notifier_block *notify_block, if (!err) break; + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "pre_suspend failed for non-removable host: " + "%d\n", err); + /* Avoid removing non-removable hosts */ + break; + } + /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 88699f852aa2..b3b9d78f789a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -388,7 +388,8 @@ int mmc_retune(struct mmc_host *host) else return 0; - if (!host->need_retune || host->doing_retune || !host->card) + if (!host->need_retune || host->doing_retune || !host->card || + mmc_card_hs400es(host->card)) return 0; host->need_retune = 0; @@ -635,6 +636,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) #endif setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); + mutex_init(&host->rpmb_req_mutex); + /* * By default, hosts do not support SGIO or large requests. * They have to set these according to their abilities. diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 6f4f81a370d8..c8f85b31e2ac 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1275,6 +1275,8 @@ static int mmc_select_hs400(struct mmc_card *card) if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) { mmc_host_clk_hold(host); err = host->ops->enhanced_strobe(host); + if (!err) + host->ios.enhanced_strobe = true; mmc_host_clk_release(host); } else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) && host->ops->execute_tuning) { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fb204ee6ff89..581f5d0271f4 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -619,6 +619,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32; /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; p->des1 = 0; p->des2 = 0; p->des3 = 0; @@ -640,6 +641,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) i++, p++) { p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); + p->des0 = 0; p->des1 = 0; } @@ -2807,8 +2809,8 @@ static bool dw_mci_reset(struct dw_mci *host) } if (host->use_dma == TRANS_MODE_IDMAC) - /* It is also recommended that we reset and reprogram idmac */ - dw_mci_idmac_reset(host); + /* It is also required that we reinit idmac */ + dw_mci_idmac_init(host); ret = true; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7fb0753abe30..6b814d7d6560 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1776,8 +1776,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) */ if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { struct pinctrl *p = devm_pinctrl_get(host->dev); - if (!p) { - ret = -ENODEV; + if (IS_ERR(p)) { + ret = PTR_ERR(p); goto err_free_irq; } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 83b1226471c1..ac66c61d9433 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -418,6 +418,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* + * Limit SD clock to 167MHz for ls1046a according to its datasheet + */ + if (clock > 167000000 && + of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc")) + clock = 167000000; + + /* + * Limit SD clock to 125MHz for ls1012a according to its datasheet + */ + if (clock > 125000000 && + of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc")) + clock = 125000000; + /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { if (clock > 20000000) diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 7c0b27d132b1..b479bd81120b 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, do { uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); mask = (1 << (cfi->device_type * 8)) - 1; + if (ofs >= map->size) + return 0; result = map_read(map, base + ofs); bank++; } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 7f4ac8c19001..5e3fa5861039 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -726,6 +726,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_regs __iomem *ifc = ctrl->regs; u32 nand_fsr; + int status; /* Use READ_STATUS command, but wait for the device to be ready */ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -740,12 +741,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) fsl_ifc_run_command(mtd); nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - + status = nand_fsr >> 24; /* * The chip always seems to report that it is * write-protected, even when it is not. */ - return nand_fsr | NAND_STATUS_WP; + return status | NAND_STATUS_WP; } static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 27864c0863ef..8406f346b0be 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -626,7 +626,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, readcmd, ctrl); ctrl &= ~NAND_CTRL_CHANGE; } - chip->cmd_ctrl(mtd, command, ctrl); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, ctrl); /* Address cycle, when necessary */ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; @@ -655,6 +656,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: @@ -717,7 +719,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, + NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -750,6 +754,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_CACHEDPROG: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 3ea4c022cbb9..ccdb3dd74421 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->last_eb_bytes = vol->usable_leb_size; } + /* Make volume "available" before it becomes accessible via sysfs */ + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; + ubi->vol_count += 1; + spin_unlock(&ubi->volumes_lock); + /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; @@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (err) goto out_sysfs; - spin_lock(&ubi->volumes_lock); - ubi->volumes[vol_id] = vol; - ubi->vol_count += 1; - spin_unlock(&ubi->volumes_lock); - ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); self_check_volumes(ubi); return err; @@ -328,6 +329,10 @@ out_sysfs: out_cdev: cdev_del(&vol->cdev); out_mapping: + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = NULL; + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); if (do_free) kfree(vol->eba_tbl); out_acc: diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cb34b0f3856..278d12888cab 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1490,39 +1490,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_close; } - /* If the mode uses primary, then the following is handled by - * bond_change_active_slave(). - */ - if (!bond_uses_primary(bond)) { - /* set promiscuity level to new slave */ - if (bond_dev->flags & IFF_PROMISC) { - res = dev_set_promiscuity(slave_dev, 1); - if (res) - goto err_close; - } - - /* set allmulti level to new slave */ - if (bond_dev->flags & IFF_ALLMULTI) { - res = dev_set_allmulti(slave_dev, 1); - if (res) - goto err_close; - } - - netif_addr_lock_bh(bond_dev); - - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - - netif_addr_unlock_bh(bond_dev); - } - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); - } - res = vlan_vids_add_by_dev(slave_dev, bond_dev); if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", @@ -1679,6 +1646,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_upper_unlink; } + /* If the mode uses primary, then the following is handled by + * bond_change_active_slave(). + */ + if (!bond_uses_primary(bond)) { + /* set promiscuity level to new slave */ + if (bond_dev->flags & IFF_PROMISC) { + res = dev_set_promiscuity(slave_dev, 1); + if (res) + goto err_sysfs_del; + } + + /* set allmulti level to new slave */ + if (bond_dev->flags & IFF_ALLMULTI) { + res = dev_set_allmulti(slave_dev, 1); + if (res) { + if (bond_dev->flags & IFF_PROMISC) + dev_set_promiscuity(slave_dev, -1); + goto err_sysfs_del; + } + } + + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + /* add lacpdu mc addr to mc list */ + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + + dev_mc_add(slave_dev, lacpdu_multicast); + } + } + bond->slave_cnt++; bond_compute_features(bond); bond_set_carrier(bond); @@ -1702,6 +1703,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ +err_sysfs_del: + bond_sysfs_slave_del(new_slave); + err_upper_unlink: bond_upper_dev_unlink(bond_dev, slave_dev); @@ -1709,9 +1713,6 @@ err_unregister: netdev_rx_handler_unregister(slave_dev); err_detach: - if (!bond_uses_primary(bond)) - bond_hw_addr_flush(bond_dev, slave_dev); - vlan_vids_del_by_dev(slave_dev, bond_dev); if (rcu_access_pointer(bond->primary_slave) == new_slave) RCU_INIT_POINTER(bond->primary_slave, NULL); @@ -2555,11 +2556,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_for_each_slave_rcu(bond, slave, iter) { unsigned long trans_start = dev_trans_start(slave->dev); + slave->new_link = BOND_LINK_NOCHANGE; + if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, trans_start, 1) && bond_time_in_interval(bond, slave->last_rx, 1)) { - slave->link = BOND_LINK_UP; + slave->new_link = BOND_LINK_UP; slave_state_changed = 1; /* primary_slave has no meaning in round-robin @@ -2586,7 +2589,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, slave->last_rx, 2)) { - slave->link = BOND_LINK_DOWN; + slave->new_link = BOND_LINK_DOWN; slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) @@ -2617,6 +2620,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (!rtnl_trylock()) goto re_arm; + bond_for_each_slave(bond, slave, iter) { + if (slave->new_link != BOND_LINK_NOCHANGE) + slave->link = slave->new_link; + } + if (slave_state_changed) { bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) @@ -3276,12 +3284,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { u64 nv = new[i]; u64 ov = old[i]; + s64 delta = nv - ov; /* detects if this particular field is 32bit only */ if (((nv | ov) >> 32) == 0) - res[i] += (u32)nv - (u32)ov; - else - res[i] += nv - ov; + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; } } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, return 0; } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); + struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - dlc = cf->can_dlc; id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; + rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - /* Store echo skb before starting the transfer */ - can_put_echo_skb(skb, dev, 0); - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} - stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + netif_stop_queue(dev); + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_skb = skb; + cc770_tx(dev, mo); return NETDEV_TX_OK; } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); + struct can_frame *cf; + u8 ctrl1; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - /* Nothing more to send, switch off interrupts */ cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + + if (unlikely(!priv->tx_skb)) { + netdev_err(dev, "missing tx skb in tx interrupt\n"); + return; + } + + if (unlikely(ctrl1 & MSGLST_SET)) { + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* When the CC770 is sending an RTR message and it receives a regular + * message that matches the id of the RTR message, it will overwrite the + * outgoing message in the TX register. When this happens we must + * process the received message and try to transmit the outgoing skb + * again. */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + if (unlikely(ctrl1 & NEWDAT_SET)) { + cc770_rx(dev, mo, ctrl1); + cc770_tx(dev, mo); + return; + } + cf = (struct can_frame *)priv->tx_skb->data; + stats->tx_bytes += cf->can_dlc; stats->tx_packets++; + + can_put_echo_skb(priv->tx_skb, dev, 0); can_get_echo_skb(dev, 0); + priv->tx_skb = NULL; + netif_wake_queue(dev); } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv { u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ u8 bus_config; /* Bus conffiguration register */ + + struct sk_buff *tx_skb; }; struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c31e691d11fc..e8d31640058d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -604,6 +604,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + CFG_CLE_IP_HDR_LEN_SET(&cb, 0); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index c153a1dc5ff7..480312105964 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -147,6 +147,7 @@ enum xgene_enet_rm { #define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5) #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c31c7407b753..425dae560322 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -150,8 +150,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* Optional regulator for PHY */ priv->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(priv->regulator)) { - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out_clk_disable; + } dev_err(dev, "no regulator found\n"); priv->regulator = NULL; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 027705117086..af9ec57bbebf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -729,37 +729,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { struct net_device *ndev = priv->netdev; - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } ring->c_index = c_index; @@ -1229,6 +1225,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f28bf545d7f4..8ace6ecb5f79 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 74bece5897c9..949a82458a29 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2044,6 +2044,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; + fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; @@ -3942,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif } diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 0f6811860ad5..a36e38676640 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } static void diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index cc1725616f9d..50747573f42e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (!g) { netif_info(lio, tx_err, lio->netdev, "Transmit scatter gather: glist null!\n"); - goto lio_xmit_dma_failed; + goto lio_xmit_failed; } cmdsetup.s.gather = 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cf61a5869c6e..de23f23b41de 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6076,13 +6076,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; + /* Disable FW_OK flag so that mbox commands with FW_OK flag set + * wont be sent when we are flashing FW. + */ + adap->flags &= ~FW_OK; + ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) - return ret; + goto out; ret = t4_load_fw(adap, fw_data, size); if (ret < 0) - return ret; + goto out; /* * Older versions of the firmware don't understand the new @@ -6093,7 +6098,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, * its header flags to see if it advertises the capability. */ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); - return t4_fw_restart(adap, mbox, reset); + ret = t4_fw_restart(adap, mbox, reset); + + /* Grab potentially new Firmware Device Log parameters so we can see + * how healthy the new Firmware is. It's okay to contact the new + * Firmware for these parameters even though, as far as it's + * concerned, we've never said "HELLO" to it ... + */ + (void)t4_init_devlog_params(adap); +out: + adap->flags |= FW_OK; + return ret; } /** @@ -7696,7 +7711,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); if (ret) break; - idx = (idx + 1) & UPDBGLARDPTR_M; + + /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to + * identify the 32-bit portion of the full 312-bit data + */ + if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) + idx = (idx & 0xff0) + 0x10; + else + idx++; + /* address can't exceed 0xfff */ + idx &= UPDBGLARDPTR_M; } restart: if (cfg & UPDBGLAEN_F) { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fa3786a9d30e..ec8ffd7eae33 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2604,8 +2604,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; unsigned int ingpadboundary, ingpackboundary; @@ -2614,9 +2614,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { @@ -2627,8 +2638,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 6d0c5d5eea6d..58c0fccdd8cb 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <net/ip.h> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 458e2d97d096..ae8e4fc22e7b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3539,6 +3539,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 40071dad1c57..9c76f1a2f57b 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -382,7 +382,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) { const struct of_device_id *id = of_match_device(fsl_pq_mdio_match, &pdev->dev); - const struct fsl_pq_mdio_data *data = id->data; + const struct fsl_pq_mdio_data *data; struct device_node *np = pdev->dev.of_node; struct resource res; struct device_node *tbi; @@ -390,6 +390,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) struct mii_bus *new_bus; int err; + if (!id) { + dev_err(&pdev->dev, "Failed to match device\n"); + return -ENODEV; + } + + data = id->data; + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); new_bus = mdiobus_alloc_size(sizeof(*priv)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 802d55457f19..b1a27aef4425 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -776,7 +776,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data) */ static int hns_xgmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a0332129970b..4b91eb70c683 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1000,8 +1000,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) cnt--; return cnt; - } else { + } else if (stringset == ETH_SS_STATS) { return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); + } else { + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5d7db6c01c46..f301c03c527b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int n = 20; + bool __maybe_unused try_internal_clock = false; DBG(dev, "reset" NL); @@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE +do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: @@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev) * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. + * + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the + * ethernet cable is not attached. This causes the reset to timeout + * and the PHY detection code in emac_init_phy() is unable to + * communicate and detect the AR8035-A PHY. As a result, the emac + * driver bails out early and the user has no ethernet. + * In order to stay compatible with existing configurations, the + * driver will temporarily switch to the internal clock, after + * the first reset fails. */ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, 0, SDR0_ETH_CFG_ECS << dev->cell_index); @@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev) #ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (!n && !try_internal_clock) { + /* first attempt has timed out. */ + n = 20; + try_internal_clock = true; + goto do_retry; + } + + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, SDR0_ETH_CFG_ECS << dev->cell_index, 0); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5205f1ebe381..20d8806d2bff 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) struct e1000_hw *hw = &adapter->hw; if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct sk_buff *skb = adapter->tx_hwtstamp_skb; struct skb_shared_hwtstamps shhwtstamps; u64 txstmp; @@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); - skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); - dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ adapter->tx_hwtstamp_skb = NULL; + wmb(); /* force write prior to skb_tstamp_tx */ + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); } else if (time_after(jiffies, adapter->tx_hwtstamp_start + adapter->tx_timeout_factor * HZ)) { dev_kfree_skb_any(adapter->tx_hwtstamp_skb); @@ -3526,6 +3532,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; case e1000_pch_lpt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ @@ -6583,12 +6595,17 @@ static int e1000e_pm_thaw(struct device *dev) static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + int rc; e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); - return __e1000_shutdown(pdev, false); + rc = __e1000_shutdown(pdev, false); + if (rc) + e1000e_pm_thaw(dev); + + return rc; } static int e1000e_pm_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2ce0eba5e040..38431b49020f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -983,7 +983,7 @@ static void fm10k_self_test(struct net_device *dev, memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); - if (FM10K_REMOVED(hw)) { + if (FM10K_REMOVED(hw->hw_addr)) { netif_err(interface, drv, dev, "Interface removed - test blocked\n"); eth_test->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 488a50d59dca..3da1f206ff84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1073,6 +1073,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev) struct i40e_hw *hw = &np->vsi->back->hw; u32 val; +#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF + if (hw->mac.type == I40E_MAC_X722) { + val = X722_EEPROM_SCOPE_LIMIT + 1; + return val; + } val = (rd32(hw, I40E_GLPCI_LBARCTRL) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 6100cdd9ad13..dd4e6ea9e0e1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { enum i40e_status_code ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_read_nvm_word_aq(hw, offset, data); - i40e_release_nvm(hw); + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); } - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + i40e_release_nvm(hw); } return ret_code; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 4b62aa1f9ff8..6e5065f0907b 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1d4e2e054647..897d061e4f03 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -35,6 +35,7 @@ #include <linux/etherdevice.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> #include <linux/export.h> #include "mlx4.h" @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (!mlx4_qp_lookup(dev, rule->qpn)) { + mlx4_err_rule(dev, "QP doesn't exist\n", rule); + ret = -EINVAL; + goto out; + } + trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); list_for_each_entry(cur, &rule->list, list) { ret = parse_trans_rule(dev, cur, mailbox->buf + size); - if (ret < 0) { - mlx4_free_cmd_mailbox(dev, mailbox); - return ret; - } + if (ret < 0) + goto out; + size += ret; } @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, } } +out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index d8359ffba026..62f1a3433a62 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -381,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) __mlx4_qp_free_icm(dev, qpn); } +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + + spin_unlock(&qp_table->lock); + return qp; +} + int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -468,6 +481,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, } if (attr & MLX4_UPDATE_QP_QOS_VPORT) { + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { + mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); + err = -EOPNOTSUPP; + goto out; + } + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; cmd->qp_context.qos_vport = params->qos_vport; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d1fc7fa87b05..e3080fbd9d00 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -5040,6 +5040,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } +static void update_qos_vpp(struct mlx4_update_qp_context *ctx, + struct mlx4_vf_immed_vlan_work *work) +{ + ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); + ctx->qp_context.qos_vport = work->qos_vport; +} + void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) { struct mlx4_vf_immed_vlan_work *work = @@ -5144,11 +5151,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) qp->sched_queue & 0xC7; upd_context->qp_context.pri_path.sched_queue |= ((work->qos & 0x7) << 3); - upd_context->qp_mask |= - cpu_to_be64(1ULL << - MLX4_UPD_QP_MASK_QOS_VPP); - upd_context->qp_context.qos_vport = - work->qos_vport; + + if (dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_QOS_VPP) + update_qos_vpp(upd_context, work); } err = mlx4_cmd(dev, mailbox->dma, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f5c1f4acc57b..7c42be586be8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -513,7 +513,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); @@ -523,18 +522,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), priv->irq_info[i].mask); - err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); - if (err) { - mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", - irq); - goto err_clear_mask; - } + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); return 0; - -err_clear_mask: - free_cpumask_var(priv->irq_info[i].mask); - return err; } static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index b8d5270359cd..e30676515529 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) cmd.req.arg3 = 0; if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) - netxen_issue_cmd(adapter, &cmd); + rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 509b596cf1e8..bd1ec70fb736 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) } return -EIO; } - usleep_range(1000, 1500); + udelay(1200); } if (id_reg) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..ffa6885acfc8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -127,6 +127,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index be258d90de9e..e3223f2fe2ff 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_mpi_coredump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* Get generic NIC reg dump */ @@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_reg_dump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 1ef03939d25f..c90ae4d4be7d 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca) /* Allocate rx SKB if we don't have one available. */ if (!qca->rx_skb) { - qca->rx_skb = netdev_alloc_skb(net_dev, - net_dev->mtu + VLAN_ETH_HLEN); + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, + net_dev->mtu + + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); qca->stats.out_of_mem++; @@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca) qca->rx_skb, qca->rx_skb->dev); qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx_ni(qca->rx_skb); - qca->rx_skb = netdev_alloc_skb(net_dev, + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); @@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev) if (!qca->rx_buffer) return -ENOBUFS; - qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); + qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + + VLAN_ETH_HLEN); if (!qca->rx_skb) { kfree(qca->rx_buffer); netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3783c40f568b..a82c89af7124 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8411,12 +8411,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_msi_4; } + pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) goto err_out_cnt_5; - pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 424d1dee55c9..afaf79b8761f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3222,7 +3222,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { - dev_err(&ndev->dev, "failed to initialise MDIO\n"); + dev_err(&pdev->dev, "failed to initialise MDIO\n"); goto out_release; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fc958067d10a..c69b0bdd891d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -280,6 +280,10 @@ struct cpsw_ss_regs { /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 +#define CPSW_MAX_BLKS_TX 15 +#define CPSW_MAX_BLKS_TX_SHIFT 4 +#define CPSW_MAX_BLKS_RX 5 + struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; @@ -878,7 +882,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); - else if (phy->speed == 10) + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) mac_control |= BIT(18); /* In Band mode */ if (priv->rx_pause) @@ -1126,11 +1131,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) switch (priv->version) { case CPSW_VERSION_1: slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); break; case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); break; } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 49fe59b180a8..a75ce9051a7f 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case HDLCDRVCTL_CALIBRATE: if(!capable(CAP_SYS_RAWIO)) return -EPERM; + if (s->par.bitrate <= 0) + return -EINVAL; if (bi.data.calibrate > INT_MAX / s->par.bitrate) return -EINVAL; s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8aecea0d5dbf..142015af43db 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -282,6 +282,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { + if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + ipvlan->phy_dev->dev_addr)) + skb->pkt_type = PACKET_OTHERHOST; + ret = RX_HANDLER_ANOTHER; success = true; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d0690433ee0..7d2cf015c5e7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev) if (phydev->drv->aneg_done) return phydev->drv->aneg_done(phydev); + /* Avoid genphy_aneg_done() if the Clause 45 PHY does not + * implement Clause 22 registers + */ + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + return -EINVAL; + return genphy_aneg_done(phydev); } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e2decf71c6d1..46448d7e3290 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2952,6 +2952,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index f7e8c79349ad..12a627fcc02c 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -501,7 +501,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; - ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 61cd53838360..9bca36e1fefd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2380,7 +2380,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } @@ -2660,7 +2660,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 72cb30828a12..c8e98c8e29fa 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1069,6 +1069,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) u16 n = 0, index, ndplen; u8 ready2send = 0; u32 delayed_ndp_size; + size_t padding_count; /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated * accordingly. Otherwise, we should check here. @@ -1225,11 +1226,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * a ZLP after full sized NTBs. */ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && - skb_out->len > ctx->min_tx_pkt) - memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, - ctx->tx_max - skb_out->len); - else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_max - skb_out->len; + memset(skb_put(skb_out, padding_count), 0, padding_count); + } else if (skb_out->len < ctx->tx_max && + (skb_out->len % dev->maxpacket) == 0) { *skb_put(skb_out, 1) = 0; /* force short packet */ + } /* set final frame length */ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ba21d072be31..6b4cc1c2e6b4 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -399,6 +399,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; + peer->gso_max_size = dev->gso_max_size; + peer->gso_max_segs = dev->gso_max_segs; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8dfc75250583..d01285250204 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -556,7 +556,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, hdr = skb_vnet_hdr(skb); sg_init_table(rq->sg, 2); sg_set_buf(rq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + + err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + if (unlikely(err < 0)) { + dev_kfree_skb(skb); + return err; + } err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) @@ -858,7 +863,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; - unsigned num_sg; + int num_sg; unsigned hdr_len = vi->hdr_len; bool can_push; @@ -911,11 +916,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); - num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; + num_sg++; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 82bf85ae5d08..419c045d0752 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2789,6 +2789,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter) /* we need to enable NAPI, otherwise dev_close will deadlock */ for (i = 0; i < adapter->num_rx_queues; i++) napi_enable(&adapter->rx_queue[i].napi); + /* + * Need to clear the quiesce bit to ensure that vmxnet3_close + * can quiesce the device properly + */ + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); dev_close(adapter->netdev); } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ac945f8781ac..d3d59122a357 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -550,13 +550,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (!IS_ERR(neigh)) + if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); + rcu_read_unlock_bh(); + return ret; + } rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dab3bf6649e6..c41378214ede 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev, return false; /* Don't migrate static entries, drop packets */ - if (f->state & NUD_NOARP) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) return true; if (net_ratelimit()) @@ -2834,6 +2834,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, needed_headroom = lowerdev->hard_header_len; } + if (lowerdev) { + dev->gso_max_size = lowerdev->gso_max_size; + dev->gso_max_segs = lowerdev->gso_max_segs; + } + if (conf->mtu) { err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); if (err) diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d7645581f91..4842344a96f1 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index db363856e0b5..2b064998915f 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev, card->rambase == NULL) { pr_err("ioremap() failed\n"); pc300_pci_remove_one(pdev); + return -ENOMEM; } /* PLX PCI 9050 workaround for local configuration register read bug */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 42aab9b86af3..0836a81b93e0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2226,6 +2226,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; + struct ath10k_vif *arvif; + + /* Just check for for the first vif alone, as all the vifs will be + * sharing the same channel and if the channel is disabled, all the + * vifs will share the same 'is_started' state. + */ + arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); + if (!arvif->is_started) + return -EINVAL; ieee80211_radar_detected(ar->hw); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 8d382f12b5fd..008fd633cb5d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5665,6 +5665,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, arvif->vdev_id, ret); } +static void ath10k_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + mutex_lock(&ar->conf_mutex); + memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); + arvif->gtk_rekey_data.replay_ctr = + be64_to_cpup((__be64 *)data->replay_ctr); + arvif->gtk_rekey_data.valid = true; + mutex_unlock(&ar->conf_mutex); +} + static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -6151,6 +6167,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", @@ -7174,7 +7200,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, lockdep_assert_held(&ar->data_lock); WARN_ON(ctx && vifs); - WARN_ON(vifs && n_vifs != 1); + WARN_ON(vifs && !n_vifs); /* FIXME: Sort of an optimization and a workaround. Peers and vifs are * on a linked list now. Doing a lookup peer -> vif -> chanctx for each @@ -7599,6 +7625,7 @@ static const struct ieee80211_ops ath10k_ops = { .bss_info_changed = ath10k_bss_info_changed, .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, + .set_rekey_data = ath10k_set_rekey_data, .set_key = ath10k_set_key, .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, @@ -7634,7 +7661,6 @@ static const struct ieee80211_ops ath10k_ops = { .suspend = ath10k_wow_op_suspend, .resume = ath10k_wow_op_resume, .set_wakeup = ath10k_wow_op_set_wakeup, - .set_rekey_data = ath10k_wow_op_set_rekey_data, #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, @@ -8308,6 +8334,7 @@ err_free: void ath10k_mac_unregister(struct ath10k *ar) { + ath10k_wow_deinit(ar); ieee80211_unregister_hw(ar->hw); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 2dc2b5360ee8..d938ca951aee 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3123,13 +3123,14 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, void *ptr; int i; struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload; + struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload; struct wmi_ns_offload *ns_tuple; struct wmi_arp_offload *arp_tuple; len = sizeof(*cmd) + sizeof(*tlv) + - sizeof(*tlv) + WMI_MAX_NS_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv)) + - sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv)); skb = ath10k_wmi_alloc_skb(ar, len); @@ -3147,33 +3148,49 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, ptr += (sizeof(*tlv) + sizeof(*cmd)); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_NS_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload)); ns_tuple = (struct wmi_ns_offload *)tlv->value; - ns_tuple->flags |= __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + if (ns->enable_offload) { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); + if (ns->info.target_addr_valid.s6_addr[i]) { + memcpy(&ns_tuple->target_ipaddr[0], + &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + memcpy(&ns_tuple->solicitation_ipaddr, + &ns->info.self_addr[i], sizeof(struct in6_addr)); + if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY) + ns_tuple->flags |= + __cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST); + } else { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + } ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload)); tlv = ptr; } tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_ARP_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload)); arp_tuple = (struct wmi_arp_offload *)tlv->value; if (arp->enable_offload && (i == 0)) { arp_tuple->flags |= - __cpu_to_le32(WMI_ARPOFF_FLAGS_VALID); + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); memcpy(&arp_tuple->target_ipaddr, &arp->params.ipv4_addr, sizeof(arp_tuple->target_ipaddr)); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 57b81b8bae82..4892c7d3cce3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -21,6 +21,7 @@ #include <linux/types.h> #include <net/mac80211.h> #include <linux/ipv6.h> +#include <net/ipv6.h> #include <linux/in.h> /* @@ -2887,13 +2888,12 @@ struct wmi_start_scan_common { } __packed; /* ARP-NS offload data structure */ -#define WMI_NSOFF_MAX_TARGET_IPS 2 -#define WMI_MAX_NS_OFFLOADS 2 -#define WMI_MAX_ARP_OFFLOADS 2 -#define WMI_ARPOFF_FLAGS_VALID BIT(0) +#define WMI_NS_ARP_OFFLOAD 2 +#define WMI_ARP_NS_OFF_FLAGS_VALID BIT(0) #define WMI_IPV4_ARP_REPLY_OFFLOAD 0 #define WMI_ARP_NS_OFFLOAD_DISABLE 0 #define WMI_ARP_NS_OFFLOAD_ENABLE 1 +#define WMI_NSOFF_IPV6_ANYCAST BIT(3) struct wmi_ns_offload_info { struct in6_addr src_addr; @@ -2902,7 +2902,7 @@ struct wmi_ns_offload_info { struct wmi_mac_addr self_macaddr; u8 src_ipv6_addr_valid; struct in6_addr target_addr_valid; - struct in6_addr target_addr_ac_type; + struct in6_addr target_ipv6_ac; u8 slot_idx; } __packed; @@ -2914,13 +2914,13 @@ struct wmi_ns_arp_offload_req { struct in_addr ipv4_addr; struct in6_addr ipv6_addr; } params; - struct wmi_ns_offload_info offload_info; + struct wmi_ns_offload_info info; struct wmi_mac_addr bssid; } __packed; struct wmi_ns_offload { __le32 flags; - struct in6_addr target_ipaddr[WMI_NSOFF_MAX_TARGET_IPS]; + struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD]; struct in6_addr solicitation_ipaddr; struct in6_addr remote_ipaddr; struct wmi_mac_addr target_mac; @@ -5088,7 +5088,8 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 -#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 262a1a19196e..2280f47dc227 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -17,6 +17,7 @@ #include "mac.h" #include <net/mac80211.h> +#include <net/addrconf.h> #include "hif.h" #include "core.h" #include "debug.h" @@ -232,6 +233,116 @@ static int ath10k_wow_wakeup(struct ath10k *ar) } static int +ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif, + bool enable_offload) +{ + struct in6_addr addr[TARGET_NUM_STATIONS]; + struct wmi_ns_arp_offload_req *ns; + struct wireless_dev *wdev; + struct inet6_dev *in6_dev; + struct in6_addr addr_type; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *ifaca; + struct list_head *addr_list; + u32 scope, count = 0; + int i; + + ns = &arvif->ns_offload; + if (!enable_offload) { + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE); + return 0; + } + + wdev = ieee80211_vif_to_wdev(arvif->vif); + if (!wdev) + return -ENODEV; + + in6_dev = __in6_dev_get(wdev->netdev); + if (!in6_dev) + return -ENODEV; + + memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr)); + memset(&addr_type, 0, sizeof(struct in6_addr)); + + /* Unicast Addresses */ + read_lock_bh(&in6_dev->lock); + list_for_each(addr_list, &in6_dev->addr_list) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + ifa = list_entry(addr_list, struct inet6_ifaddr, if_list); + if (ifa->flags & IFA_F_DADFAILED) + continue; + scope = ipv6_addr_src_scope(&ifa->addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifa->addr.s6_addr, + sizeof(ifa->addr.s6_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_UNICAST; + count += 1; + break; + } + } + + /* Anycast Addresses */ + for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + scope = ipv6_addr_src_scope(&ifaca->aca_addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifaca->aca_addr, + sizeof(ifaca->aca_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_ANY; + count += 1; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + /* Filling up the request structure + * Filling the self_addr with solicited address + * A Solicited-Node multicast address is created by + * taking the last 24 bits of a unicast or anycast + * address and appending them to the prefix + * + * FF02:0000:0000:0000:0000:0001:FFXX:XXXX + * + * here XX is the unicast/anycast bits + */ + for (i = 0; i < count; i++) { + ns->info.self_addr[i].s6_addr[0] = 0xFF; + ns->info.self_addr[i].s6_addr[1] = 0x02; + ns->info.self_addr[i].s6_addr[11] = 0x01; + ns->info.self_addr[i].s6_addr[12] = 0xFF; + ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13]; + ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14]; + ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15]; + ns->info.slot_idx = i; + memcpy(&ns->info.target_addr[i], &addr[i], + sizeof(struct in6_addr)); + ns->info.target_addr_valid.s6_addr[i] = 1; + ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i]; + memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE); + ns->num_ns_offload_count = __cpu_to_le16(count); + + return 0; +} + +static int ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif, bool enable_offload) { @@ -291,6 +402,13 @@ static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload) return ret; } + ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload); + if (ret) { + ath10k_err(ar, "NS-offload config failed, vdev: %d\n", + arvif->vdev_id); + return ret; + } + ret = ath10k_wmi_set_arp_ns_offload(ar, arvif); if (ret) { ath10k_err(ar, "failed to send offload cmd, vdev: %d\n", @@ -327,22 +445,6 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar) return 0; } -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - - mutex_lock(&ar->conf_mutex); - memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); - memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); - arvif->gtk_rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); - arvif->gtk_rekey_data.valid = true; - mutex_unlock(&ar->conf_mutex); -} - static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload) { struct ath10k_vif *arvif; @@ -391,6 +493,13 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto exit; } + ret = ath10k_wow_cleanup(ar); + if (ret) { + ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + ret = ath10k_wow_config_gtk_offload(ar, true); if (ret) { ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret); @@ -403,18 +512,11 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto disable_gtk_offload; } - ret = ath10k_wow_cleanup(ar); - if (ret) { - ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", - ret); - goto disable_ns_arp_offload; - } - ret = ath10k_wow_set_wakeups(ar, wowlan); if (ret) { ath10k_warn(ar, "failed to set wow wakeup events: %d\n", ret); - goto cleanup; + goto disable_ns_arp_offload; } ret = ath10k_config_wow_listen_interval(ar); @@ -577,8 +679,15 @@ int ath10k_wow_init(struct ath10k *ar) ar->wow.wowlan_support = ath10k_wowlan_support; ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; - - device_set_wakeup_capable(ar->dev, true); + device_init_wakeup(ar->dev, true); return 0; } + +void ath10k_wow_deinit(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features) && + test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)) + device_init_wakeup(ar->dev, false); +} diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index b53211584052..2ca4ba4848c9 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -27,13 +27,11 @@ struct ath10k_wow { #ifdef CONFIG_PM int ath10k_wow_init(struct ath10k *ar); +void ath10k_wow_deinit(struct ath10k *ar); int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int ath10k_wow_op_resume(struct ieee80211_hw *hw); void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data); #else static inline int ath10k_wow_init(struct ath10k *ar) @@ -41,5 +39,8 @@ static inline int ath10k_wow_init(struct ath10k *ar) return 0; } +void ath10k_wow_deinit(struct ath10k *ar) +{ +} #endif /* CONFIG_PM */ #endif /* _WOW_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 654a1e33f827..7c5f189cace7 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file) } for (i = 0; i < eesize; ++i) { - AR5K_EEPROM_READ(i, val); + if (!ath5k_hw_nvram_read(ah, i, &val)) { + ret = -EIO; + goto freebuf; + } buf[i] = val; } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 213569d384e7..5cde46c82a03 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain) EXPORT_SYMBOL(ath_is_49ghz_allowed); /* Frequency is one where radar detection is required */ -static bool ath_is_radar_freq(u16 center_freq) +static bool ath_is_radar_freq(u16 center_freq, + struct ath_regulatory *reg) + { + if (reg->country_code == CTRY_INDIA) + return (center_freq >= 5500 && center_freq <= 5700); return (center_freq >= 5260 && center_freq <= 5720); } @@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *ch) { - if (ath_is_radar_freq(ch->center_freq) || + if (ath_is_radar_freq(ch->center_freq, reg) || (ch->flags & IEEE80211_CHAN_RADAR)) return; @@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy, } } -/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ -static void ath_reg_apply_radar_flags(struct wiphy *wiphy) +/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */ +static void ath_reg_apply_radar_flags(struct wiphy *wiphy, + struct ath_regulatory *reg) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; @@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy) for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; - if (!ath_is_radar_freq(ch->center_freq)) + if (!ath_is_radar_freq(ch->center_freq, reg)) continue; /* We always enable radar detection/DFS on this * frequency range. Additionally we also apply on @@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy, struct ath_common *common = container_of(reg, struct ath_common, regulatory); /* We always apply this */ - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); /* * This would happen when we have sent a custom regulatory request @@ -669,7 +674,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, chan->flags |= IEEE80211_CHAN_DISABLED; } - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index d224b3dd72ed..3196245ab820 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -461,25 +461,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) * @dev_addr: optional device address. * * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address. */ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) { - struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - bool local_admin = false; + bool random_addr = false; - if (!dev_addr || is_zero_ether_addr(dev_addr)) { - dev_addr = pri_ifp->mac_addr; - local_admin = true; - } + if (!dev_addr || is_zero_ether_addr(dev_addr)) + random_addr = true; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. */ - memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); - if (local_admin) - p2p->dev_addr[0] |= 0x02; + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c index af92f00ca56e..03219cf1693a 100644 --- a/drivers/net/wireless/cnss/cnss_pci.c +++ b/drivers/net/wireless/cnss/cnss_pci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -569,6 +569,7 @@ out: static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state) { +#ifndef CONFIG_MSM_GVM_QUIN if (!info->prop) return; @@ -588,6 +589,9 @@ static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state) pr_debug("%s: %s gpio is now %s\n", __func__, info->name, info->state ? "enabled" : "disabled"); +#else + return; +#endif } static int cnss_configure_wlan_en_gpio(bool state) @@ -1560,7 +1564,6 @@ int cnss_msm_pcie_enumerate(u32 rc_idx) return msm_pcie_enumerate(rc_idx); } #else /* !defined CONFIG_PCI_MSM */ - struct pci_saved_state *cnss_pci_store_saved_state(struct pci_dev *dev) { return NULL; @@ -1570,7 +1573,7 @@ int cnss_msm_pcie_pm_control( enum msm_pcie_pm_opt pm_opt, u32 bus_num, struct pci_dev *pdev, u32 options) { - return -ENODEV; + return 0; } int cnss_pci_load_and_free_saved_state( @@ -1581,27 +1584,27 @@ int cnss_pci_load_and_free_saved_state( int cnss_msm_pcie_shadow_control(struct pci_dev *dev, bool enable) { - return -ENODEV; + return 0; } int cnss_msm_pcie_deregister_event(struct msm_pcie_register_event *reg) { - return -ENODEV; + return 0; } int cnss_msm_pcie_recover_config(struct pci_dev *dev) { - return -ENODEV; + return 0; } int cnss_msm_pcie_register_event(struct msm_pcie_register_event *reg) { - return -ENODEV; + return 0; } int cnss_msm_pcie_enumerate(u32 rc_idx) { - return -EPROBE_DEFER; + return 0; } #endif @@ -2870,7 +2873,9 @@ static int cnss_probe(struct platform_device *pdev) struct esoc_desc *desc; const char *client_desc; struct device *dev = &pdev->dev; +#ifndef CONFIG_MSM_GVM_QUIN u32 rc_num; +#endif struct resource *res; u32 ramdump_size = 0; u32 smmu_iova_address[2]; @@ -2905,6 +2910,7 @@ static int cnss_probe(struct platform_device *pdev) goto err_get_rc; } +#ifndef CONFIG_MSM_GVM_QUIN ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num); if (ret) { pr_err("%s: Failed to find PCIe RC number!\n", __func__); @@ -2916,6 +2922,7 @@ static int cnss_probe(struct platform_device *pdev) pr_err("%s: Failed to enable PCIe RC%x!\n", __func__, rc_num); goto err_pcie_enumerate; } +#endif penv->pcie_link_state = PCIE_LINK_UP; @@ -3096,7 +3103,9 @@ err_subsys_reg: devm_unregister_esoc_client(&pdev->dev, penv->esoc_desc); err_esoc_reg: +#ifndef CONFIG_MSM_GVM_QUIN err_pcie_enumerate: +#endif err_get_rc: cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_release_resources(); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index dcaf3203f197..fc35b0892768 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1690,6 +1690,7 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv) static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv) { + plat_priv->cal_done = true; cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01); cnss_shutdown(plat_priv); clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index f658b6d391b8..a36281cb560f 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -107,6 +107,7 @@ struct cnss_fw_mem { void *va; phys_addr_t pa; bool valid; + u32 type; }; enum cnss_driver_event_type { @@ -192,7 +193,8 @@ struct cnss_plat_data { struct wlfw_rf_board_info_s_v01 board_info; struct wlfw_soc_info_s_v01 soc_info; struct wlfw_fw_version_info_s_v01 fw_version_info; - struct cnss_fw_mem fw_mem; + u32 fw_mem_seg_len; + struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; struct cnss_fw_mem m3_mem; struct cnss_pin_connect_result pin_result; struct dentry *root_dentry; @@ -204,6 +206,7 @@ struct cnss_plat_data { u32 diag_reg_read_mem_type; u32 diag_reg_read_len; u8 *diag_reg_read_buf; + bool cal_done; }; void *cnss_bus_dev_to_bus_priv(struct device *dev); diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 2ee0edda1b5b..5746366ff852 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -737,18 +737,21 @@ int cnss_pm_request_resume(struct cnss_pci_data *pci_priv) int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - - if (!fw_mem->va && fw_mem->size) { - fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, - fw_mem->size, &fw_mem->pa, - GFP_KERNEL); - if (!fw_mem->va) { - cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n", - fw_mem->size); - fw_mem->size = 0; - - return -ENOMEM; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int i; + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (!fw_mem[i].va && fw_mem[i].size) { + fw_mem[i].va = + dma_alloc_coherent(&pci_priv->pci_dev->dev, + fw_mem[i].size, + &fw_mem[i].pa, GFP_KERNEL); + if (!fw_mem[i].va) { + cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n", + fw_mem[i].size, fw_mem[i].type); + + return -ENOMEM; + } } } @@ -758,17 +761,25 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - - if (fw_mem->va && fw_mem->size) { - cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", - fw_mem->va, &fw_mem->pa, fw_mem->size); - dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size, - fw_mem->va, fw_mem->pa); - fw_mem->va = NULL; - fw_mem->pa = 0; - fw_mem->size = 0; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int i; + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (fw_mem[i].va && fw_mem[i].size) { + cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", + fw_mem[i].va, &fw_mem[i].pa, + fw_mem[i].size, fw_mem[i].type); + dma_free_coherent(&pci_priv->pci_dev->dev, + fw_mem[i].size, fw_mem[i].va, + fw_mem[i].pa); + fw_mem[i].va = NULL; + fw_mem[i].pa = 0; + fw_mem[i].size = 0; + fw_mem[i].type = 0; + } } + + plat_priv->fw_mem_seg_len = 0; } int cnss_pci_load_m3(struct cnss_pci_data *pci_priv) diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index f4344aee54ee..b8777c18d252 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -159,10 +159,9 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); - req.daemon_support_valid = 1; - req.daemon_support = daemon_support; - - cnss_pr_dbg("daemon_support is %d\n", req.daemon_support); + req.num_clients_valid = 1; + req.num_clients = daemon_support ? 2 : 1; + cnss_pr_dbg("Number of clients is %d\n", req.num_clients); req.wake_msi = cnss_get_wake_msi(plat_priv); if (req.wake_msi) { @@ -170,6 +169,19 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) req.wake_msi_valid = 1; } + req.bdf_support_valid = 1; + req.bdf_support = 1; + + req.m3_support_valid = 1; + req.m3_support = 1; + + req.m3_cache_support_valid = 1; + req.m3_cache_support = 1; + + req.cal_done_valid = 1; + req.cal_done = plat_priv->cal_done; + cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done); + req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN; req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01; req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei; @@ -221,8 +233,8 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv) req.request_mem_enable = 1; req.fw_mem_ready_enable_valid = 1; req.fw_mem_ready_enable = 1; - req.cold_boot_cal_done_enable_valid = 1; - req.cold_boot_cal_done_enable = 1; + req.fw_init_done_enable_valid = 1; + req.fw_init_done_enable = 1; req.pin_connect_result_enable_valid = 1; req.pin_connect_result_enable = 1; @@ -260,27 +272,48 @@ static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv, void *msg, unsigned int msg_len) { struct msg_desc ind_desc; - struct wlfw_request_mem_ind_msg_v01 ind_msg; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - int ret = 0; + struct wlfw_request_mem_ind_msg_v01 *ind_msg; + int ret = 0, i; + + ind_msg = kzalloc(sizeof(*ind_msg), GFP_KERNEL); + if (!ind_msg) + return -ENOMEM; ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01; ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN; ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei; - ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + ret = qmi_kernel_decode(&ind_desc, ind_msg, msg, msg_len); if (ret < 0) { cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n", ret, msg_len); - return ret; + goto out; } - fw_mem->size = ind_msg.size; + if (ind_msg->mem_seg_len == 0 || + ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { + cnss_pr_err("Invalid memory segment length: %u\n", + ind_msg->mem_seg_len); + ret = -EINVAL; + goto out; + } + + cnss_pr_dbg("FW memory segment count is %u\n", ind_msg->mem_seg_len); + plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len; + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type; + plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size; + } cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM, 0, NULL); + kfree(ind_msg); return 0; + +out: + kfree(ind_msg); + return ret; } static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv, @@ -317,29 +350,46 @@ static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv, int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) { - struct wlfw_respond_mem_req_msg_v01 req; - struct wlfw_respond_mem_resp_msg_v01 resp; + struct wlfw_respond_mem_req_msg_v01 *req; + struct wlfw_respond_mem_resp_msg_v01 *resp; struct msg_desc req_desc, resp_desc; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - int ret = 0; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int ret = 0, i; cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n", plat_priv->driver_state); - if (!fw_mem->pa || !fw_mem->size) { - cnss_pr_err("Memory for FW is not available!\n"); - ret = -ENOMEM; - goto out; - } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; - cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", - fw_mem->va, &fw_mem->pa, fw_mem->size); + req->mem_seg_len = plat_priv->fw_mem_seg_len; + for (i = 0; i < req->mem_seg_len; i++) { + if (!fw_mem[i].pa || !fw_mem[i].size) { + if (fw_mem[i].type == 0) { + cnss_pr_err("Invalid memory for FW type, segment = %d\n", + i); + ret = -EINVAL; + goto out; + } + cnss_pr_err("Memory for FW is not available for type: %u\n", + fw_mem[i].type); + ret = -ENOMEM; + goto out; + } - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); + cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", + fw_mem[i].va, &fw_mem[i].pa, + fw_mem[i].size, fw_mem[i].type); - req.addr = fw_mem->pa; - req.size = fw_mem->size; + req->mem_seg[i].addr = fw_mem[i].pa; + req->mem_seg[i].size = fw_mem[i].size; + req->mem_seg[i].type = fw_mem[i].type; + } req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN; req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01; @@ -349,8 +399,8 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01; resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei; - ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, - sizeof(req), &resp_desc, &resp, sizeof(resp), + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req, + sizeof(*req), &resp_desc, resp, sizeof(*resp), QMI_WLFW_TIMEOUT_MS); if (ret < 0) { cnss_pr_err("Failed to send respond memory request, err = %d\n", @@ -358,16 +408,21 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) goto out; } - if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { cnss_pr_err("Respond memory request failed, result: %d, err: %d\n", - resp.resp.result, resp.resp.error); - ret = resp.resp.result; + resp->resp.result, resp->resp.error); + ret = resp->resp.result; goto out; } + kfree(req); + kfree(resp); return 0; + out: CNSS_ASSERT(0); + kfree(req); + kfree(resp); return ret; } @@ -908,12 +963,12 @@ static void cnss_wlfw_clnt_ind(struct qmi_handle *handle, CNSS_DRIVER_EVENT_FW_MEM_READY, 0, NULL); break; - case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01: + case QMI_WLFW_FW_READY_IND_V01: cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, 0, NULL); break; - case QMI_WLFW_FW_READY_IND_V01: + case QMI_WLFW_FW_INIT_DONE_IND_V01: cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL); @@ -974,11 +1029,11 @@ int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv) cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n", plat_priv->driver_state); - ret = cnss_wlfw_host_cap_send_sync(plat_priv); + ret = cnss_wlfw_ind_register_send_sync(plat_priv); if (ret < 0) goto out; - ret = cnss_wlfw_ind_register_send_sync(plat_priv); + ret = cnss_wlfw_host_cap_send_sync(plat_priv); if (ret < 0) goto out; diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c index 7d6a771bc0d5..bbf707b869bd 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -62,7 +62,7 @@ static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -97,7 +97,7 @@ static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -123,7 +123,7 @@ static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -140,7 +140,7 @@ static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -175,7 +175,131 @@ static struct elem_info wlfw_memory_region_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + secure_flag), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_seg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01, + .elem_size = sizeof(struct wlfw_mem_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg), + .ei_array = wlfw_mem_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_seg_resp_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + type), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + restore), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -201,7 +325,7 @@ static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -218,7 +342,7 @@ static struct elem_info wlfw_rf_board_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -235,7 +359,7 @@ static struct elem_info wlfw_soc_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -261,7 +385,7 @@ static struct elem_info wlfw_fw_version_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -418,7 +542,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, - cold_boot_cal_done_enable_valid), + fw_init_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, @@ -427,7 +551,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, - cold_boot_cal_done_enable), + fw_init_done_enable), }, { .data_type = QMI_OPT_FLAG, @@ -448,9 +572,45 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { rejuvenate_enable), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cal_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cal_done_enable), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -489,7 +649,7 @@ struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -497,7 +657,7 @@ struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -505,7 +665,7 @@ struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -573,7 +733,7 @@ struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -608,7 +768,7 @@ struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -626,7 +786,7 @@ struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -764,7 +924,7 @@ struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -782,7 +942,7 @@ struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -790,7 +950,7 @@ struct elem_info wlfw_cap_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -920,7 +1080,7 @@ struct elem_info wlfw_cap_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1054,7 +1214,7 @@ struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1073,7 +1233,7 @@ struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1117,7 +1277,7 @@ struct elem_info wlfw_cal_report_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1135,7 +1295,7 @@ struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1153,7 +1313,7 @@ struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1269,7 +1429,7 @@ struct elem_info wlfw_cal_download_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1288,7 +1448,7 @@ struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1316,7 +1476,7 @@ struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1342,7 +1502,7 @@ struct elem_info wlfw_cal_update_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1459,7 +1619,7 @@ struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1485,7 +1645,7 @@ struct elem_info wlfw_msa_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1522,7 +1682,7 @@ struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1530,7 +1690,7 @@ struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1548,7 +1708,7 @@ struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1574,7 +1734,7 @@ struct elem_info wlfw_ini_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1592,7 +1752,7 @@ struct elem_info wlfw_ini_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1627,7 +1787,7 @@ struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1676,7 +1836,7 @@ struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1724,7 +1884,7 @@ struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1743,7 +1903,7 @@ struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1760,7 +1920,7 @@ struct elem_info wlfw_vbatt_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1778,7 +1938,7 @@ struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1804,7 +1964,7 @@ struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1822,7 +1982,7 @@ struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1834,16 +1994,16 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, - daemon_support_valid), + num_clients_valid), }, { - .data_type = QMI_UNSIGNED_1_BYTE, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(u8), + .elem_size = sizeof(u32), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, - daemon_support), + num_clients), }, { .data_type = QMI_OPT_FLAG, @@ -1864,9 +2024,216 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { wake_msi), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1884,50 +2251,61 @@ struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = { { - .data_type = QMI_UNSIGNED_4_BYTE, + .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(u32), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, - size), + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + mem_seg), + .ei_array = wlfw_mem_seg_s_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = { { - .data_type = QMI_UNSIGNED_8_BYTE, + .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(u64), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, - addr), + mem_seg_len), }, { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .is_array = NO_ARRAY, - .tlv_type = 0x02, + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, - size), + mem_seg), + .ei_array = wlfw_mem_seg_resp_s_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1945,7 +2323,7 @@ struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1953,15 +2331,15 @@ struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; -struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = { +struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2041,7 +2419,7 @@ struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2049,7 +2427,7 @@ struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2068,7 +2446,7 @@ struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2096,7 +2474,7 @@ struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2155,7 +2533,7 @@ struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2181,7 +2559,7 @@ struct elem_info wlfw_m3_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2199,7 +2577,7 @@ struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2216,6 +2594,14 @@ struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_done_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h index 9b56eb0c02fb..00a873d11d14 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,10 +23,12 @@ #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A +#define QMI_WLFW_CAL_DONE_IND_V01 0x003E #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B #define QMI_WLFW_M3_INFO_REQ_V01 0x003C #define QMI_WLFW_CAP_REQ_V01 0x0024 +#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038 #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 #define QMI_WLFW_M3_INFO_RESP_V01 0x003C #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 @@ -42,7 +44,6 @@ #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 -#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_REJUVENATE_IND_V01 0x0039 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B @@ -72,13 +73,16 @@ #define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 #define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2 +#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 32 #define QMI_WLFW_MAX_NUM_CAL_V01 5 #define QMI_WLFW_MAX_DATA_SIZE_V01 6144 #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 #define QMI_WLFW_MAX_NUM_CE_V01 12 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 #define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144 +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2 #define QMI_WLFW_MAX_STR_LEN_V01 16 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 #define QMI_WLFW_MAC_ADDR_SIZE_V01 6 @@ -117,6 +121,17 @@ enum wlfw_pipedir_enum_v01 { WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX, }; +enum wlfw_mem_type_enum_v01 { + WLFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_MEM_TYPE_MSA_V01 = 0, + QMI_WLFW_MEM_TYPE_DDR_V01 = 1, + QMI_WLFW_MEM_BDF_V01 = 2, + QMI_WLFW_MEM_M3_V01 = 3, + QMI_WLFW_MEM_CAL_V01 = 4, + QMI_WLFW_MEM_DPD_V01 = 5, + WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) @@ -128,6 +143,7 @@ enum wlfw_pipedir_enum_v01 { #define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL) #define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL) #define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL) +#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL) #define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL) @@ -160,6 +176,26 @@ struct wlfw_memory_region_info_s_v01 { u8 secure_flag; }; +struct wlfw_mem_cfg_s_v01 { + u64 offset; + u32 size; + u8 secure_flag; +}; + +struct wlfw_mem_seg_s_v01 { + u32 size; + enum wlfw_mem_type_enum_v01 type; + u32 mem_cfg_len; + struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01]; +}; + +struct wlfw_mem_seg_resp_s_v01 { + u64 addr; + u32 size; + enum wlfw_mem_type_enum_v01 type; + u8 restore; +}; + struct wlfw_rf_chip_info_s_v01 { u32 chip_id; u32 chip_family; @@ -195,13 +231,17 @@ struct wlfw_ind_register_req_msg_v01 { u8 request_mem_enable; u8 fw_mem_ready_enable_valid; u8 fw_mem_ready_enable; - u8 cold_boot_cal_done_enable_valid; - u8 cold_boot_cal_done_enable; + u8 fw_init_done_enable_valid; + u8 fw_init_done_enable; u8 rejuvenate_enable_valid; u32 rejuvenate_enable; + u8 xo_cal_enable_valid; + u8 xo_cal_enable; + u8 cal_done_enable_valid; + u8 cal_done_enable; }; -#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46 +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 54 extern struct elem_info wlfw_ind_register_req_msg_v01_ei[]; struct wlfw_ind_register_resp_msg_v01 { @@ -533,13 +573,36 @@ struct wlfw_mac_addr_resp_msg_v01 { extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[]; struct wlfw_host_cap_req_msg_v01 { - u8 daemon_support_valid; - u8 daemon_support; + u8 num_clients_valid; + u32 num_clients; u8 wake_msi_valid; u32 wake_msi; -}; - -#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11 + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 extern struct elem_info wlfw_host_cap_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { @@ -550,18 +613,19 @@ struct wlfw_host_cap_resp_msg_v01 { extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[]; struct wlfw_request_mem_ind_msg_v01 { - u32 size; + u32 mem_seg_len; + struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7 +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1124 extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[]; struct wlfw_respond_mem_req_msg_v01 { - u64 addr; - u32 size; + u32 mem_seg_len; + struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18 +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 548 extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[]; struct wlfw_respond_mem_resp_msg_v01 { @@ -578,12 +642,12 @@ struct wlfw_fw_mem_ready_ind_msg_v01 { #define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[]; -struct wlfw_cold_boot_cal_done_ind_msg_v01 { +struct wlfw_fw_init_done_ind_msg_v01 { char placeholder; }; -#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 -extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[]; +#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[]; struct wlfw_rejuvenate_ind_msg_v01 { u8 cause_for_rejuvenation_valid; @@ -654,4 +718,11 @@ struct wlfw_xo_cal_ind_msg_v01 { #define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4 extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[]; +struct wlfw_cal_done_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cal_done_ind_msg_v01_ei[]; + #endif diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index f877fbc7d7af..8a9164da6c50 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index fbb1986eda3c..686b1b5dd394 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - skb_reserve(skb, MT_DMA_HDR_LEN); - memcpy(skb_put(skb, len), data, len); + if (skb) { + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + } return skb; } @@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); } @@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0881ba8535f4..c78abfc7bd96 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = { 0x04, 0x08, /* Noise gain, limit offset */ 0x28, 0x28, /* det rssi, med busy offsets */ 7, /* det sync thresh */ - 0, 2, 2 /* test mode, min, max */ + 0, 2, 2, /* test mode, min, max */ + 0, /* rx/tx delay */ + 0, 0, 0, 0, 0, 0, /* current BSS id */ + 0 /* hop set */ }; /*===========================================================================*/ @@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local) * a_beacon_period = hops a_beacon_period = KuS *//* 64ms = 010000 */ if (local->fw_ver == 0x55) { - memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms, + memcpy(&local->sparm.b4, b4_default_startup_parms, sizeof(struct b4_startup_params)); /* Translate sane kus input values to old build 4/5 format */ /* i = hop time in uS truncated to 3 bytes */ diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index c48b7e8ee0d6..b51815eccdb3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1572,7 +1572,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } + + if (rtlpriv->use_new_trx_flow) { + rtlpci->tx_ring[i].cur_tx_rp = 0; + rtlpci->tx_ring[i].cur_tx_wp = 0; + } + ring->idx = 0; + ring->entries = rtlpci->txringcount[i]; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 5a3df9198ddf..89515f02c353 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1123,7 +1123,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) /* Configuration Space offset 0x70f BIT7 is used to control L0S */ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); - _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | + ASPM_L1_LATENCY << 3); /* Configuration Space offset 0x719 Bit3 is for L1 * BIT4 is for clock request diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index a13d1f2b5912..259590013382 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -3425,6 +3425,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + if (!priv->workqueue) { + wiphy_free(wiphy); + return -ENOMEM; + } INIT_WORK(&priv->work, rndis_wlan_worker); INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9bee3f11898a..869411f55d88 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1196,8 +1196,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; - wl1251_acx_arp_ip_filter(wl, enable, addr); - + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); if (ret < 0) goto out_sleep; } diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index af62c4c854f3..b4f31dad40d6 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -17,7 +17,7 @@ */ #include <linux/module.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include <linux/firmware.h> #include <linux/nfc.h> #include <net/nfc/nci.h> diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index a7faa0bcc01e..fc8e78a29d77 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv, /* Send the SPI packet */ err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, skb); - if (err != 0) { + if (err) nfc_err(priv->dev, "spi_send failed %d", err); - kfree_skb(skb); - } + return err; } diff --git a/drivers/of/device.c b/drivers/of/device.c index 97a280d50d6d..7c509bff9295 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) str[i] = '_'; } - return tsize; + return repend; } EXPORT_SYMBOL_GPL(of_device_get_modalias); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 78530d1714dc..bdce0679674c 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards { netmos_9901, netmos_9865, quatech_sppxp100, + wch_ch382l, }; @@ -2708,6 +2709,7 @@ static struct parport_pc_pci { /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, + /* wch_ch382l */ { 1, { { 2, -1 }, } }, }; static const struct pci_device_id parport_pc_pci_tbl[] = { @@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, + /* WCH CH382L PCI-E single parallel port card */ + { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 193ac13de49b..566897f24dee 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -230,7 +230,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_ROM_ENABLE; l64 = l & PCI_ROM_ADDRESS_MASK; sz64 = sz & PCI_ROM_ADDRESS_MASK; - mask64 = (u32)PCI_ROM_ADDRESS_MASK; + mask64 = PCI_ROM_ADDRESS_MASK; } if (res->flags & IORESOURCE_MEM_64) { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 254192b5dad1..4eb1cf0ed00c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3631,6 +3631,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, + quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 25062966cbfa..8b2f8b2a574e 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) mask = (u32)PCI_BASE_ADDRESS_IO_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; } else if (resno == PCI_ROM_RESOURCE) { - mask = (u32)PCI_ROM_ADDRESS_MASK; + mask = PCI_ROM_ADDRESS_MASK; } else { mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 39400dda27c2..d6d671a925e1 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -323,10 +323,16 @@ validate_group(struct perf_event *event) return 0; } +static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) +{ + struct platform_device *pdev = armpmu->plat_device; + + return pdev ? dev_get_platdata(&pdev->dev) : NULL; +} + static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu; - struct platform_device *plat_device; struct arm_pmu_platdata *plat; int ret; u64 start_clock, finish_clock; @@ -338,8 +344,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) * dereference. */ armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); + + plat = armpmu_get_platdata(armpmu); start_clock = sched_clock(); if (plat && plat->handle_irq) diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2686a4450dfc..f4639a9f1e48 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -979,19 +979,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p, EXPORT_SYMBOL_GPL(pinctrl_lookup_state); /** - * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * pinctrl_commit_state() - select/activate/program a pinctrl state to HW * @p: the pinctrl handle for the device that requests configuration * @state: the state handle to select/activate/program */ -int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) { struct pinctrl_setting *setting, *setting2; struct pinctrl_state *old_state = p->state; int ret; - if (p->state == state) - return 0; - if (p->state) { /* * For each pinmux setting in the old state, forget SW's record @@ -1055,6 +1052,19 @@ unapply_new_state: return ret; } + +/** + * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * @p: the pinctrl handle for the device that requests configuration + * @state: the state handle to select/activate/program + */ +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +{ + if (p->state == state) + return 0; + + return pinctrl_commit_state(p, state); +} EXPORT_SYMBOL_GPL(pinctrl_select_state); static void devm_pinctrl_release(struct device *dev, void *res) @@ -1223,7 +1233,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map) int pinctrl_force_sleep(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep)) - return pinctrl_select_state(pctldev->p, pctldev->hog_sleep); + return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_sleep); @@ -1235,7 +1245,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep); int pinctrl_force_default(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default)) - return pinctrl_select_state(pctldev->p, pctldev->hog_default); + return pinctrl_commit_state(pctldev->p, pctldev->hog_default); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_default); diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 92430f781eb7..a0b8c8a8c323 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -59,12 +59,14 @@ static int send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { int ret; + int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg); if (ec_dev->proto_version > 2) - ret = ec_dev->pkt_xfer(ec_dev, msg); + xfer_fxn = ec_dev->pkt_xfer; else - ret = ec_dev->cmd_xfer(ec_dev, msg); + xfer_fxn = ec_dev->cmd_xfer; + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; struct cros_ec_command *status_msg; @@ -87,7 +89,7 @@ static int send_command(struct cros_ec_device *ec_dev, for (i = 0; i < EC_COMMAND_RETRIES; i++) { usleep_range(10000, 11000); - ret = ec_dev->cmd_xfer(ec_dev, status_msg); + ret = (*xfer_fxn)(ec_dev, status_msg); if (ret < 0) break; diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index f3baf9973989..24f1630a8b3f 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev, count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); else { - msg->data[sizeof(msg->data) - 1] = '\0'; + msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); } diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index fd1452e28352..dc376b0fd276 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -35,8 +35,10 @@ #define PIPE_REG_ADDRESS 0x10 /* write: physical address */ #define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */ #define PIPE_REG_WAKES 0x14 /* read: wake flags */ -#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ -#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ +#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address + */ +#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address + */ #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ #define PIPE_REG_VERSION 0x24 /* read: device version */ @@ -53,12 +55,16 @@ /* The following commands are related to write operations */ #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing - is possible */ + * is possible + */ #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading - * is possible */ + * is possible + */ -/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ +/* Possible status values used to signal errors - + * see goldfish_pipe_error_convert + */ #define PIPE_ERROR_INVAL -1 #define PIPE_ERROR_AGAIN -2 #define PIPE_ERROR_NOMEM -3 @@ -71,14 +77,6 @@ #define MAX_PAGES_TO_GRAB 32 -#define DEBUG 0 - -#if DEBUG -#define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); } -#else -#define DPRINT(...) -#endif - /* This data type models a given pipe instance */ struct goldfish_pipe { struct goldfish_pipe_dev *dev; @@ -158,6 +156,7 @@ static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev, { u32 aph, apl; u64 paddr; + aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH); apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW); @@ -174,7 +173,8 @@ static int setup_access_params_addr(struct platform_device *pdev, u64 paddr; struct access_params *aps; - aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL); + aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), + GFP_KERNEL); if (!aps) return -1; @@ -226,7 +226,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, struct goldfish_pipe *pipe = filp->private_data; struct goldfish_pipe_dev *dev = pipe->dev; unsigned long address, address_end; - struct page* pages[MAX_PAGES_TO_GRAB] = {}; + struct page *pages[MAX_PAGES_TO_GRAB] = {}; int count = 0, ret = -EINVAL; /* If the emulator already closed the pipe, no need to go further */ @@ -268,17 +268,17 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, ret = get_user_pages_fast(first_page, requested_pages, !is_write, pages); - DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__, - ret, requested_pages, first_page); + pr_debug("%s: requested pages: %d %ld %p\n", __func__, ret, + requested_pages, (void*)first_page); if (ret == 0) { - DPRINT("%s: error: (requested pages == 0) (wanted %d)\n", - __FUNCTION__, requested_pages); + pr_err("%s: error: (requested pages == 0) (wanted %ld)\n", + __func__, requested_pages); mutex_unlock(&pipe->lock); return ret; } if (ret < 0) { - DPRINT("%s: (requested pages < 0) %d \n", - __FUNCTION__, requested_pages); + pr_err("%s: (requested pages < 0) %ld \n", + __func__, requested_pages); mutex_unlock(&pipe->lock); return ret; } @@ -293,8 +293,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, xaddr_prev = xaddr_i; num_contiguous_pages++; } else { - DPRINT("%s: discontinuous page boundary: %d pages instead\n", - __FUNCTION__, page_i); + pr_err("%s: discontinuous page boundary: %d pages instead\n", + __func__, page_i); break; } } @@ -345,8 +345,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, * ABI relies on this behavior. */ if (status != PIPE_ERROR_AGAIN) - pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n", - status, is_write ? "write" : "read"); + pr_err_ratelimited("goldfish_pipe: backend returned error %d on %s\n", + status, is_write ? "write" : "read"); ret = 0; break; } @@ -516,8 +516,9 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) pipe->dev = dev; mutex_init(&pipe->lock); - DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file); - // spin lock init, write head of list, i guess + pr_debug("%s: call. pipe_dev dev=%p new_pipe_addr=%p file=%p\n", + __func__, dev, pipe, file); + /* spin lock init, write head of list, i guess */ init_waitqueue_head(&pipe->wake_queue); /* @@ -540,7 +541,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp) { struct goldfish_pipe *pipe = filp->private_data; - DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp); + pr_debug("%s: call. pipe=%p file=%p\n", __func__, pipe, filp); /* The guest is closing the channel, so tell the emulator right now */ goldfish_cmd(pipe, CMD_CLOSE); kfree(pipe); @@ -566,8 +567,8 @@ static struct miscdevice goldfish_pipe_dev = { int goldfish_pipe_device_init_v1(struct platform_device *pdev) { struct goldfish_pipe_dev *dev = pipe_dev; - int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, - IRQF_SHARED, "goldfish_pipe", dev); + int err = devm_request_irq(&pdev->dev, dev->irq, + goldfish_pipe_interrupt, IRQF_SHARED, "goldfish_pipe", dev); if (err) { dev_err(&pdev->dev, "unable to allocate IRQ for v1\n"); return err; @@ -585,5 +586,5 @@ int goldfish_pipe_device_init_v1(struct platform_device *pdev) void goldfish_pipe_device_deinit_v1(struct platform_device *pdev) { - misc_deregister(&goldfish_pipe_dev); + misc_deregister(&goldfish_pipe_dev); } diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c index ad373ed36555..3119b3341a7b 100644 --- a/drivers/platform/goldfish/goldfish_pipe_v2.c +++ b/drivers/platform/goldfish/goldfish_pipe_v2.c @@ -46,6 +46,7 @@ * exchange is properly mapped during a transfer. */ +#include <linux/printk.h> #include "goldfish_pipe.h" @@ -58,8 +59,7 @@ enum { PIPE_CURRENT_DEVICE_VERSION = 2 }; -/* - * IMPORTANT: The following constants must match the ones used and defined +/* IMPORTANT: The following constants must match the ones used and defined * in external/qemu/hw/goldfish_pipe.c in the Android source tree. */ @@ -70,7 +70,10 @@ enum PipePollFlags { PIPE_POLL_HUP = 1 << 2 }; -/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ +/* + * Possible status values used to signal errors - see + * goldfish_pipe_error_convert + */ enum PipeErrors { PIPE_ERROR_INVAL = -1, PIPE_ERROR_AGAIN = -2, @@ -117,8 +120,8 @@ enum PipeCmdCode { PIPE_CMD_WAKE_ON_READ, /* - * TODO(zyy): implement a deferred read/write execution to allow parallel - * processing of pipe operations on the host. + * TODO(zyy): implement a deferred read/write execution to allow + * parallel processing of pipe operations on the host. */ PIPE_CMD_WAKE_ON_DONE_IO, }; @@ -135,17 +138,21 @@ struct goldfish_pipe_command; /* A per-pipe command structure, shared with the host */ struct goldfish_pipe_command { - s32 cmd; /* PipeCmdCode, guest -> host */ - s32 id; /* pipe id, guest -> host */ - s32 status; /* command execution status, host -> guest */ + s32 cmd; /* PipeCmdCode, guest -> host */ + s32 id; /* pipe id, guest -> host */ + s32 status; /* command execution status, host -> guest */ s32 reserved; /* to pad to 64-bit boundary */ union { /* Parameters for PIPE_CMD_{READ,WRITE} */ struct { - u32 buffers_count; /* number of buffers, guest -> host */ - s32 consumed_size; /* number of consumed bytes, host -> guest */ - u64 ptrs[MAX_BUFFERS_PER_COMMAND]; /* buffer pointers, guest -> host */ - u32 sizes[MAX_BUFFERS_PER_COMMAND]; /* buffer sizes, guest -> host */ + /* number of buffers, guest -> host */ + u32 buffers_count; + /* number of consumed bytes, host -> guest */ + s32 consumed_size; + /* buffer pointers, guest -> host */ + u64 ptrs[MAX_BUFFERS_PER_COMMAND]; + /* buffer sizes, guest -> host */ + u32 sizes[MAX_BUFFERS_PER_COMMAND]; } rw_params; }; }; @@ -165,34 +172,46 @@ struct open_command_param { /* Device-level set of buffers shared with the host */ struct goldfish_pipe_dev_buffers { struct open_command_param open_command_params; - struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; + struct signalled_pipe_buffer + signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; }; /* This data type models a given pipe instance */ struct goldfish_pipe { - u32 id; /* pipe ID - index into goldfish_pipe_dev::pipes array */ - unsigned long flags; /* The wake flags pipe is waiting for - * Note: not protected with any lock, uses atomic operations - * and barriers to make it thread-safe. - */ - unsigned long signalled_flags; /* wake flags host have signalled, - * - protected by goldfish_pipe_dev::lock */ + /* pipe ID - index into goldfish_pipe_dev::pipes array */ + u32 id; - struct goldfish_pipe_command *command_buffer; /* A pointer to command buffer */ + /* The wake flags pipe is waiting for. + * Note: not protected with any lock, uses atomic operations and + * barriers to make it thread-safe. + */ + unsigned long flags; + + /* wake flags host have signalled, + * protected by goldfish_pipe_dev::lock + */ + unsigned long signalled_flags; - /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */ + /* A pointer to command buffer */ + struct goldfish_pipe_command *command_buffer; + + /* doubly linked list of signalled pipes, + * protected by goldfish_pipe_dev::lock + */ struct goldfish_pipe *prev_signalled; struct goldfish_pipe *next_signalled; /* * A pipe's own lock. Protects the following: - * - *command_buffer - makes sure a command can safely write its parameters - * to the host and read the results back. + * - *command_buffer - makes sure a command can safely write its + * parameters to the host and read the results back. */ struct mutex lock; - wait_queue_head_t wake_queue; /* A wake queue for sleeping until host signals an event */ - struct goldfish_pipe_dev *dev; /* Pointer to the parent goldfish_pipe_dev instance */ + /* A wake queue for sleeping until host signals an event */ + wait_queue_head_t wake_queue; + /* Pointer to the parent goldfish_pipe_dev instance */ + struct goldfish_pipe_dev *dev; }; struct goldfish_pipe_dev pipe_dev[1] = {}; @@ -200,7 +219,8 @@ struct goldfish_pipe_dev pipe_dev[1] = {}; static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) { pipe->command_buffer->cmd = cmd; - pipe->command_buffer->status = PIPE_ERROR_INVAL; /* failure by default */ + /* failure by default */ + pipe->command_buffer->status = PIPE_ERROR_INVAL; writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); return pipe->command_buffer->status; } @@ -235,10 +255,12 @@ static int goldfish_pipe_error_convert(int status) static int pin_user_pages(unsigned long first_page, unsigned long last_page, unsigned last_page_size, int is_write, - struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size) + struct page *pages[MAX_BUFFERS_PER_COMMAND], + unsigned *iter_last_page_size) { int ret; int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; + if (requested_pages > MAX_BUFFERS_PER_COMMAND) { requested_pages = MAX_BUFFERS_PER_COMMAND; *iter_last_page_size = PAGE_SIZE; @@ -260,10 +282,11 @@ static void release_user_pages(struct page **pages, int pages_count, int is_write, s32 consumed_size) { int i; + for (i = 0; i < pages_count; i++) { - if (!is_write && consumed_size > 0) { + if (!is_write && consumed_size > 0) set_page_dirty(pages[i]); - } + put_page(pages[i]); } } @@ -291,7 +314,9 @@ static void populate_rw_params( command->rw_params.sizes[0] = size_on_page; for (; i < pages_count; ++i) { xaddr = page_to_phys(pages[i]); - size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE; + size_on_page = (i == pages_count - 1) ? + iter_last_page_size : PAGE_SIZE; + if (xaddr == xaddr_prev + PAGE_SIZE) { command->rw_params.sizes[buffer_idx] += size_on_page; } else { @@ -307,7 +332,7 @@ static void populate_rw_params( static int transfer_max_buffers(struct goldfish_pipe* pipe, unsigned long address, unsigned long address_end, int is_write, unsigned long last_page, unsigned int last_page_size, - s32* consumed_size, int* status) + s32 *consumed_size, int *status) { struct page *pages[MAX_BUFFERS_PER_COMMAND]; unsigned long first_page = address & PAGE_MASK; @@ -327,8 +352,9 @@ static int transfer_max_buffers(struct goldfish_pipe* pipe, pipe->command_buffer); /* Transfer the data */ - *status = goldfish_cmd_locked(pipe, - is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); + *status = goldfish_cmd_locked( + pipe, + is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); *consumed_size = pipe->command_buffer->rw_params.consumed_size; @@ -389,12 +415,14 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, s32 consumed_size; int status; ret = transfer_max_buffers(pipe, address, address_end, is_write, - last_page, last_page_size, &consumed_size, &status); + last_page, last_page_size, &consumed_size, &status); if (ret < 0) break; if (consumed_size > 0) { - /* No matter what's the status, we've transfered something */ + /* No matter what's the status, we've transfered + * something + */ count += consumed_size; address += consumed_size; } @@ -413,8 +441,9 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, * err. */ if (status != PIPE_ERROR_AGAIN) - pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", - status, is_write ? "write" : "read"); + pr_err_ratelimited( + "goldfish_pipe: backend error %d on %s\n", + status, is_write ? "write" : "read"); break; } @@ -422,7 +451,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, * If the error is not PIPE_ERROR_AGAIN, or if we are in * non-blocking mode, just return the error code. */ - if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) { + if (status != PIPE_ERROR_AGAIN + || (filp->f_flags & O_NONBLOCK) != 0) { ret = goldfish_pipe_error_convert(status); break; } @@ -440,7 +470,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, size_t bufflen, loff_t *ppos) { - return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0); + return goldfish_pipe_read_write(filp, buffer, bufflen, + /* is_write */ 0); } static ssize_t goldfish_pipe_write(struct file *filp, @@ -461,9 +492,8 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait) poll_wait(filp, &pipe->wake_queue, wait); status = goldfish_cmd(pipe, PIPE_CMD_POLL); - if (status < 0) { + if (status < 0) return -ERESTARTSYS; - } if (status & PIPE_POLL_IN) mask |= POLLIN | POLLRDNORM; @@ -493,9 +523,9 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, || dev->first_signalled_pipe == pipe) return; /* already in the list */ pipe->next_signalled = dev->first_signalled_pipe; - if (dev->first_signalled_pipe) { + if (dev->first_signalled_pipe) dev->first_signalled_pipe->prev_signalled = pipe; - } + dev->first_signalled_pipe = pipe; } @@ -511,21 +541,22 @@ static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, pipe->next_signalled = NULL; } -static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev, +static struct goldfish_pipe *signalled_pipes_pop_front( + struct goldfish_pipe_dev *dev, int *wakes) { struct goldfish_pipe *pipe; unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); pipe = dev->first_signalled_pipe; if (pipe) { *wakes = pipe->signalled_flags; pipe->signalled_flags = 0; - /* - * This is an optimized version of signalled_pipes_remove_locked() - - * we want to make it as fast as possible to wake the sleeping pipe - * operations faster + /* This is an optimized version of + * signalled_pipes_remove_locked() - we want to make it as fast + * as possible to wake the sleeping pipe operations faster. */ dev->first_signalled_pipe = pipe->next_signalled; if (dev->first_signalled_pipe) @@ -553,8 +584,8 @@ static void goldfish_interrupt_task(unsigned long unused) clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); } /* - * wake_up_interruptible() implies a write barrier, so don't explicitly - * add another one here. + * wake_up_interruptible() implies a write barrier, so don't + * explicitly add another one here. */ wake_up_interruptible(&pipe->wake_queue); } @@ -608,6 +639,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) { int id; + for (id = 0; id < dev->pipes_capacity; ++id) if (!dev->pipes[id]) return id; @@ -657,12 +689,13 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) init_waitqueue_head(&pipe->wake_queue); /* - * Command buffer needs to be allocated on its own page to make sure it is - * physically contiguous in host's address space. + * Command buffer needs to be allocated on its own page to make sure it + * is physically contiguous in host's address space. */ pipe->command_buffer = - (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL); + (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); if (!pipe->command_buffer) { + pr_err("Could not alloc pipe command buffer!\n"); status = -ENOMEM; goto err_pipe; } @@ -671,6 +704,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) id = get_free_pipe_id_locked(dev); if (id < 0) { + pr_err("Could not get free pipe id!\n"); status = id; goto err_id_locked; } @@ -686,10 +720,13 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) (u64)(unsigned long)__pa(pipe->command_buffer); status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); spin_unlock_irqrestore(&dev->lock, flags); - if (status < 0) + if (status < 0) { + pr_err("Could not tell host of new pipe! status=%d", status); goto err_cmd; + } /* All is done, save the pipe into the file's private data field */ file->private_data = pipe; + pr_debug("%s on 0x%p\n", __func__, pipe); return 0; err_cmd: @@ -709,6 +746,8 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp) struct goldfish_pipe *pipe = filp->private_data; struct goldfish_pipe_dev *dev = pipe->dev; + pr_debug("%s on 0x%p\n", __func__, pipe); + /* The guest is closing the channel, so tell the emulator right now */ (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); @@ -757,15 +796,16 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev) dev->first_signalled_pipe = NULL; dev->pipes_capacity = INITIAL_PIPES_CAPACITY; - dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL); + dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), + GFP_KERNEL); if (!dev->pipes) return -ENOMEM; /* * We're going to pass two buffers, open_command_params and * signalled_pipe_buffers, to the host. This means each of those buffers - * needs to be contained in a single physical page. The easiest choice is - * to just allocate a page and place the buffers in it. + * needs to be contained in a single physical page. The easiest choice + * is to just allocate a page and place the buffers in it. */ BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE); page = (char*)__get_free_page(GFP_KERNEL); @@ -778,13 +818,19 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev) /* Send the buffer addresses to the host */ { u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); - writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); - writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER); - writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); + + writel((u32)(unsigned long)(paddr >> 32), + dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, + dev->base + PIPE_REG_SIGNAL_BUFFER); + writel((u32)MAX_SIGNALLED_PIPES, + dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); paddr = __pa(&dev->buffers->open_command_params); - writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH); - writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER); + writel((u32)(unsigned long)(paddr >> 32), + dev->base + PIPE_REG_OPEN_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, + dev->base + PIPE_REG_OPEN_BUFFER); } return 0; } diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c index dd9ea463c2a4..d97340477cf3 100644 --- a/drivers/platform/goldfish/pdev_bus.c +++ b/drivers/platform/goldfish/pdev_bus.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/goldfish.h> #define PDEV_BUS_OP_DONE (0x00) #define PDEV_BUS_OP_REMOVE_DEV (0x04) @@ -130,10 +131,9 @@ static int goldfish_new_pdev(void) dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1); *dev->pdev.dev.dma_mask = ~0; -#ifdef CONFIG_64BIT - writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH); -#endif - writel((u32)(unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME); + gf_write_ptr(name, pdev_bus_base + PDEV_BUS_GET_NAME, + pdev_bus_base + PDEV_BUS_GET_NAME_HIGH); + name[name_len] = '\0'; dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID); dev->pdev.resource[0].start = base; diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 039a8b6a50b5..937f10e3c9ad 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -1432,6 +1432,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Extended IOCTLs */ case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); if (copy_from_user(&extend_ioctl_data, (u8 *)ifr->ifr_ifru.ifru_data, diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index c9e5a46c08f0..4dd7e4f3728e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1566,6 +1566,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Extended IOCTLs */ case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); if (copy_from_user(&extend_ioctl_data, (u8 *)ifr->ifr_ifru.ifru_data, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index a3661cc44f86..0e0403e024c5 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -101,6 +101,15 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X302UA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X401U", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index dfe1ee89f7c7..922a86787c5c 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c @@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res) static struct device *dev; static struct pda_power_pdata *pdata; static struct resource *ac_irq, *usb_irq; -static struct timer_list charger_timer; -static struct timer_list supply_timer; -static struct timer_list polling_timer; +static struct delayed_work charger_work; +static struct delayed_work polling_work; +static struct delayed_work supply_work; static int polling; static struct power_supply *pda_psy_ac, *pda_psy_usb; @@ -140,7 +140,7 @@ static void update_charger(void) } } -static void supply_timer_func(unsigned long unused) +static void supply_work_func(struct work_struct *work) { if (ac_status == PDA_PSY_TO_CHANGE) { ac_status = new_ac_status; @@ -161,11 +161,12 @@ static void psy_changed(void) * Okay, charger set. Now wait a bit before notifying supplicants, * charge power should stabilize. */ - mod_timer(&supply_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_charger)); + cancel_delayed_work(&supply_work); + schedule_delayed_work(&supply_work, + msecs_to_jiffies(pdata->wait_for_charger)); } -static void charger_timer_func(unsigned long unused) +static void charger_work_func(struct work_struct *work) { update_status(); psy_changed(); @@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply) * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ - mod_timer(&charger_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_status)); + cancel_delayed_work(&charger_work); + schedule_delayed_work(&charger_work, + msecs_to_jiffies(pdata->wait_for_status)); return IRQ_HANDLED; } -static void polling_timer_func(unsigned long unused) +static void polling_work_func(struct work_struct *work) { int changed = 0; @@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused) if (changed) psy_changed(); - mod_timer(&polling_timer, - jiffies + msecs_to_jiffies(pdata->polling_interval)); + cancel_delayed_work(&polling_work); + schedule_delayed_work(&polling_work, + msecs_to_jiffies(pdata->polling_interval)); } #if IS_ENABLED(CONFIG_USB_PHY) @@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb, * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ - mod_timer(&charger_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_status)); + cancel_delayed_work(&charger_work); + schedule_delayed_work(&charger_work, + msecs_to_jiffies(pdata->wait_for_status)); return NOTIFY_OK; } @@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev) if (!pdata->ac_max_uA) pdata->ac_max_uA = 500000; - setup_timer(&charger_timer, charger_timer_func, 0); - setup_timer(&supply_timer, supply_timer_func, 0); + INIT_DELAYED_WORK(&charger_work, charger_work_func); + INIT_DELAYED_WORK(&supply_work, supply_work_func); ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac"); usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb"); @@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev) if (polling) { dev_dbg(dev, "will poll for status\n"); - setup_timer(&polling_timer, polling_timer_func, 0); - mod_timer(&polling_timer, - jiffies + msecs_to_jiffies(pdata->polling_interval)); + INIT_DELAYED_WORK(&polling_work, polling_work_func); + cancel_delayed_work(&polling_work); + schedule_delayed_work(&polling_work, + msecs_to_jiffies(pdata->polling_interval)); } if (ac_irq || usb_irq) @@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev) free_irq(ac_irq->start, pda_psy_ac); if (polling) - del_timer_sync(&polling_timer); - del_timer_sync(&charger_timer); - del_timer_sync(&supply_timer); + cancel_delayed_work_sync(&polling_work); + cancel_delayed_work_sync(&charger_work); + cancel_delayed_work_sync(&supply_work); if (pdata->is_usb_online) power_supply_unregister(pda_psy_usb); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 84419af16f77..fd12ccc11e26 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone( power_zone->id = result; idr_init(&power_zone->idr); + result = -ENOMEM; power_zone->name = kstrdup(name, GFP_KERNEL); if (!power_zone->name) goto err_name_alloc; diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 2e481b9e8ea5..60a5e0c63a13 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -97,30 +97,26 @@ static s32 scaled_ppm_to_ppb(long ppm) /* posix clock implementation */ -static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) +static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = 1; return 0; } -static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) +static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); - struct timespec64 ts = timespec_to_timespec64(*tp); - return ptp->info->settime64(ptp->info, &ts); + return ptp->info->settime64(ptp->info, tp); } -static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp) +static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); - struct timespec64 ts; int err; - err = ptp->info->gettime64(ptp->info, &ts); - if (!err) - *tp = timespec64_to_timespec(ts); + err = ptp->info->gettime64(ptp->info, tp); return err; } @@ -133,7 +129,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { - struct timespec ts; + struct timespec64 ts; ktime_t kt; s64 delta; @@ -146,7 +142,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) return -EINVAL; - kt = timespec_to_ktime(ts); + kt = timespec64_to_ktime(ts); delta = ktime_to_ns(kt); err = ops->adjtime(ops, delta); } else if (tx->modes & ADJ_FREQUENCY) { diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index d4de0607b502..3039fb762893 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); unsigned long long c; unsigned long rate, hz; + unsigned long long ns100 = NSEC_PER_SEC; u32 val = 0; int err; @@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * cycles at the PWM clock rate will take period_ns nanoseconds. */ rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; - hz = NSEC_PER_SEC / period_ns; - rate = (rate + (hz / 2)) / hz; + /* Consider precision in PWM_SCALE_WIDTH rate calculation */ + ns100 *= 100; + hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns); + rate = DIV_ROUND_CLOSEST(rate * 100, hz); /* * Since the actual PWM divider is the register's frequency divider diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3a6d0290c54c..c5e272ea4372 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -296,6 +296,11 @@ static int anatop_regulator_probe(struct platform_device *pdev) if (!sreg->sel && !strcmp(sreg->name, "vddpu")) sreg->sel = 22; + /* set the default voltage of the pcie phy to be 1.100v */ + if (!sreg->sel && rdesc->name && + !strcmp(rdesc->name, "vddpcie")) + sreg->sel = 0x10; + if (!sreg->bypass && !sreg->sel) { dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); return -EINVAL; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 853976bd3d36..9473715725df 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -217,6 +217,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) missing = year; } + /* Can't proceed if alarm is still invalid after replacing + * missing fields. + */ + err = rtc_valid_tm(&alarm->time); + if (err) + goto done; + /* with luck, no rollover is needed */ t_now = rtc_tm_to_time64(&now); t_alm = rtc_tm_to_time64(&alarm->time); @@ -268,9 +275,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) dev_warn(&rtc->dev, "alarm rollover not handled\n"); } -done: err = rtc_valid_tm(&alarm->time); +done: if (err) { dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 8f7034ba7d9e..86015b393dd5 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -41,6 +41,9 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_platform.h> +#ifdef CONFIG_X86 +#include <asm/i8259.h> +#endif /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include <asm-generic/rtc.h> @@ -1058,17 +1061,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { cmos_wake_setup(&pnp->dev); - if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) + if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { + unsigned int irq = 0; +#ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to - * hardcode it in these cases + * hardcode it on systems with a legacy PIC. */ + if (nr_legacy_irqs()) + irq = 8; +#endif return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), 8); - else + pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); + } else { return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), pnp_irq(pnp, 0)); + } } static void __exit cmos_pnp_remove(struct pnp_dev *pnp) diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 3b3049c8c9e0..c0eb113588ff 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (get_user(new_margin, (int __user *)arg)) return -EFAULT; + /* the hardware's tick rate is 4096 Hz, so + * the counter value needs to be scaled accordingly + */ + new_margin <<= 12; if (new_margin < 1 || new_margin > 16777216) return -EINVAL; @@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, ds1374_wdt_ping(); /* fallthrough */ case WDIOC_GETTIMEOUT: - return put_user(wdt_margin, (int __user *)arg); + /* when returning ... inverse is true */ + return put_user((wdt_margin >> 12), (int __user *)arg); case WDIOC_SETOPTIONS: if (copy_from_user(&options, (int __user *)arg, sizeof(int))) return -EFAULT; @@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (options & WDIOS_DISABLECARD) { pr_info("disable watchdog\n"); ds1374_wdt_disable(); + return 0; } if (options & WDIOS_ENABLECARD) { pr_info("enable watchdog\n"); ds1374_wdt_settimeout(wdt_margin); ds1374_wdt_ping(); + return 0; } - return -EINVAL; } return -ENOTTY; diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index 229dd2fe8f45..c6b0c7ed7a30 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -150,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); + + /* check if no alarm is set */ + if (y_m_d == 0 && h_m_s_ms == 0) { + pr_debug("No alarm is set\n"); + rc = -ENOENT; + goto exit; + } else { + pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms); + } + opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); exit: diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index 950c5d0b6dca..afab89f5be48 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev) of_property_read_u32(pdev->dev.of_node, "offset", &data->offset); } - if (!data->regmap) { + if (IS_ERR(data->regmap)) { dev_err(&pdev->dev, "Can't find snvs syscon\n"); return -ENODEV; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e7a6f1222642..b76a85d14ef0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device, { int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* dasd is being set offline. */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * dasd is being set offline + * but it is no safe offline where we have to allow I/O + */ return 1; } if (device->stopped) { diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 5006cb6ce62d..50030cdf91fb 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -591,6 +591,11 @@ struct qeth_cmd_buffer { void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); }; +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + /** * definition of a qeth channel, used for read and write */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e5b9506698b1..95c631125a20 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -517,8 +517,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) queue == card->qdio.no_in_queues - 1; } - -static int qeth_issue_next_read(struct qeth_card *card) +static int __qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -549,6 +548,17 @@ static int qeth_issue_next_read(struct qeth_card *card) return rc; } +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; @@ -952,7 +962,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); - wake_up(&card->wait_q); + wake_up_all(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); @@ -1156,6 +1166,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } rc = qeth_get_problem(cdev, irb); if (rc) { + card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1174,7 +1185,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, return; if (channel == &card->read && channel->state == CH_STATE_UP) - qeth_issue_next_read(card); + __qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; @@ -2054,7 +2065,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); @@ -2068,23 +2079,27 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply->callback = reply_cb; reply->param = reply_param; - if (card->state == CARD_STATE_DOWN) - reply->seqno = QETH_IDX_COMMAND_SEQNO; - else - reply->seqno = card->seqno.ipa++; + init_waitqueue_head(&reply->wait_q); - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; - qeth_prepare_control_data(card, len, iob); - if (IS_IPA(iob->data)) + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; event_timeout = QETH_IPA_TIMEOUT; - else + } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; event_timeout = QETH_TIMEOUT; + } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); @@ -2109,9 +2124,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - if ((cmd->hdr.command == IPA_CMD_SETIP) && - (cmd->hdr.prot_version == QETH_PROT_IPV4)) { + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; @@ -2877,7 +2891,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; - cmd->hdr.seqno = card->seqno.ipa; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) @@ -4966,8 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); - if (card->dev) - free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 58bcb3c9a86a..acdb5ccb0ab9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1062,8 +1062,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_l2_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } return; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0d6888cbd96e..bbdb3b6c54bb 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3243,8 +3243,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 499e369eabf0..8bc1625337f6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -191,6 +191,7 @@ struct bnx2fc_hba { struct bnx2fc_cmd_mgr *cmd_mgr; spinlock_t hba_lock; struct mutex hba_mutex; + struct mutex hba_stats_mutex; unsigned long adapter_state; #define ADAPTER_STATE_UP 0 #define ADAPTER_STATE_GOING_DOWN 1 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 67405c628864..d0b227ffbd5f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) if (!fw_stats) return NULL; + mutex_lock(&hba->hba_stats_mutex); + bnx2fc_stats = fc_get_host_stats(shost); init_completion(&hba->stat_req_done); if (bnx2fc_send_stat_req(hba)) - return bnx2fc_stats; + goto unlock_stats_mutex; rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); if (!rc) { BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); - return bnx2fc_stats; + goto unlock_stats_mutex; } BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; @@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) memcpy(&hba->prev_stats, hba->stats_buffer, sizeof(struct fcoe_statistics_params)); + +unlock_stats_mutex: + mutex_unlock(&hba->hba_stats_mutex); return bnx2fc_stats; } @@ -1302,6 +1307,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) } spin_lock_init(&hba->hba_lock); mutex_init(&hba->hba_mutex); + mutex_init(&hba->hba_stats_mutex); hba->cnic = cnic; diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 622bdabc8894..dab195f04da7 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) goto bye; } - mempool_free(mbp, hw->mb_mempool); if (finicsum != cfcsum) { csio_warn(hw, "Config File checksum mismatch: csum=%#x, computed=%#x\n", @@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) rv = csio_hw_validate_caps(hw, mbp); if (rv != 0) goto bye; + + mempool_free(mbp, hw->mb_mempool); + mbp = NULL; + /* * Note that we're operating with parameters * not supplied by the driver, rather than from hard-wired diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7a58128a0000..2f61d8cd5882 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) qc->err_mask |= AC_ERR_OTHER; sata_port->ioasa.status |= ATA_BUSY; - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ata_qc_complete(qc); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -5864,8 +5866,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) res->in_erp = 0; } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -6255,8 +6259,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -6282,8 +6288,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) scsi_dma_unmap(scsi_cmd); spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index efce04df2109..9f0b00c38658 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) */ switch (session->state) { case ISCSI_STATE_FAILED: + /* + * cmds should fail during shutdown, if the session + * state is bad, allowing completion to happen + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + reason = FAILURE_SESSION_FAILED; + sc->result = DID_NO_CONNECT << 16; + break; + } case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) if (session->state != ISCSI_STATE_LOGGED_IN) { /* + * During shutdown, if session is prematurely disconnected, + * recovery won't happen and there will be hung cmds. Not + * handling cmds would trigger EH, also bad in this case. + * Instead, handle cmd, allow completion to happen and let + * upper layer to deal with the result. + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + sc->result = DID_NO_CONNECT << 16; + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); + rc = BLK_EH_HANDLED; + goto done; + } + /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ @@ -2083,7 +2105,7 @@ done: task->last_timeout = jiffies; spin_unlock(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? - "timer reset" : "nh"); + "timer reset" : "shutdown or nh"); return rc; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 022bb6e10d98..12886f96b286 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) @@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); - if (!res) + if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); @@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: + kfree(req); kfree(resp); return res; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8379fbbc60db..ef43847153ea 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13493,6 +13493,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + switch (wq->entry_size) { default: case 64: diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 14c0334f41e4..26c67c42985c 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -55,6 +55,7 @@ struct mac_esp_priv { int error; }; static struct esp *esp_chips[2]; +static DEFINE_SPINLOCK(esp_chips_lock); #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ platform_get_drvdata((struct platform_device *) \ @@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev) } host->irq = IRQ_MAC_SCSI; - esp_chips[dev->id] = esp; - mb(); - if (esp_chips[!dev->id] == NULL) { - err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); - if (err < 0) { - esp_chips[dev->id] = NULL; - goto fail_free_priv; - } + + /* The request_irq() call is intended to succeed for the first device + * and fail for the second device. + */ + err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); + spin_lock(&esp_chips_lock); + if (err < 0 && esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + goto fail_free_priv; } + esp_chips[dev->id] = esp; + spin_unlock(&esp_chips_lock); err = scsi_esp_register(esp, &dev->dev); if (err) @@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev) return 0; fail_free_irq: - if (esp_chips[!dev->id] == NULL) + spin_lock(&esp_chips_lock); + esp_chips[dev->id] = NULL; + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); free_irq(host->irq, esp); + } else + spin_unlock(&esp_chips_lock); fail_free_priv: kfree(mep); fail_free_command_block: @@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev) scsi_esp_unregister(esp); + spin_lock(&esp_chips_lock); esp_chips[dev->id] = NULL; - if (!(esp_chips[0] || esp_chips[1])) + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); free_irq(irq, NULL); + } else + spin_unlock(&esp_chips_lock); kfree(mep); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index e111c3d8c5d6..b868ef3b2ca3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3886,19 +3886,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - /* - * Bug work around for firmware SATL handling. The loop - * is based on atomic operations and ensures consistency - * since we're lockless at this point - */ - do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { - scmd->result = SAM_STAT_BUSY; - scmd->scsi_done(scmd); - return 0; - } - } while (_scsih_set_satl_pending(scmd, true)); - sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -3924,6 +3911,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) @@ -3945,6 +3945,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); + _scsih_set_satl_pending(scmd, false); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); @@ -3975,6 +3976,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (mpi_request->DataLength) { if (ioc->build_sg_scmd(ioc, scmd, smid)) { mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); goto out; } } else diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e197c6f39de2..aa18c729d23a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -365,6 +365,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) srb_t *sp = (srb_t *)ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; + del_timer(&sp->u.iocb_cmd.timer); complete(&abt->u.abt.comp); } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e6faa0b050d1..824e27eec7a1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5502,7 +5502,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, fc_port_t *fcport; int rc; - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 60720e5b1ebc..6b61b09b3226 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -180,7 +180,7 @@ static struct { {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ - {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ + {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, @@ -589,17 +589,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev, int key) { struct scsi_dev_info_list *devinfo; - int err; devinfo = scsi_dev_info_list_find(vendor, model, key); if (!IS_ERR(devinfo)) return devinfo->flags; - err = PTR_ERR(devinfo); - if (err != -ENOENT) - return err; - - /* nothing found, return nothing */ + /* key or device not found: return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 4d655b568269..5711d58f9e81 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"IBM", "1815", "rdac", }, {"IBM", "1818", "rdac", }, {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, {"SGI", "TP9", "rdac", }, {"SGI", "IS", "rdac", }, - {"STK", "OPENstorage D280", "rdac", }, + {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, + {"STK", "BladeCtlr", "rdac", }, {"SUN", "CSM", "rdac", }, {"SUN", "LCSM100", "rdac", }, {"SUN", "STK6580_6780", "rdac", }, diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 044d06410d4c..01168acc864d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -546,7 +546,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, ecomp = &edev->component[components++]; if (!IS_ERR(ecomp)) { - ses_get_power_status(edev, ecomp); if (addl_desc_ptr) ses_process_descriptor( ecomp, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 3bc15d2664a1..453171425ba9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -535,6 +535,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); retval = count; free_old_hdr: kfree(old_hdr); @@ -575,6 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) } err_out: err2 = sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return err ? : err2 ? : count; } @@ -674,18 +676,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { - static char cmd[TASK_COMM_LEN]; - if (strcmp(current->comm, cmd)) { - printk_ratelimited(KERN_WARNING - "sg_write: data in/out %d/%d bytes " - "for SCSI command 0x%x-- guessing " - "data in;\n program %s not setting " - "count and/or reply_len properly\n", - old_hdr.reply_len - (int)SZ_SG_HEADER, - input_size, (unsigned int) cmnd[0], - current->comm); - strcpy(cmd, current->comm); - } + printk_ratelimited(KERN_WARNING + "sg_write: data in/out %d/%d bytes " + "for SCSI command 0x%x-- guessing " + "data in;\n program %s not setting " + "count and/or reply_len properly\n", + old_hdr.reply_len - (int)SZ_SG_HEADER, + input_size, (unsigned int) cmnd[0], + current->comm); } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; @@ -784,11 +782,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); + if (hp->dxfer_len >= SZ_256M) + return -EINVAL; + k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { @@ -801,6 +803,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, } sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return -ENODEV; } @@ -1293,6 +1296,7 @@ sg_rq_end_io_usercontext(struct work_struct *work) struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); kref_put(&sfp->f_ref, sg_remove_sfp); } @@ -1834,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp) else sg_remove_scat(sfp, req_schp); - sg_remove_request(sfp, srp); - return ret; } @@ -2072,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ - break; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return NULL; } /* always adds to end of list */ @@ -2182,12 +2185,17 @@ sg_remove_sfp_usercontext(struct work_struct *work) struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; Sg_request *srp; + unsigned long iflags; /* Cleanup any responses which were never read(). */ + write_lock_irqsave(&sfp->rq_list_lock, iflags); while (!list_empty(&sfp->rq_list)) { srp = list_first_entry(&sfp->rq_list, Sg_request, entry); sg_finish_rem_req(srp); + list_del(&srp->entry); + srp->parentfp = NULL; } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 03a2aadf0d3c..8ef905cbfc9c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -28,6 +28,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> +#include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 @@ -704,6 +705,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return virtscsi_tmf(vscsi, cmd); } +static int virtscsi_device_alloc(struct scsi_device *sdevice) +{ + /* + * Passed through SCSI targets (e.g. with qemu's 'scsi-block') + * may have transfer limits which come from the host SCSI + * controller or something on the host side other than the + * target itself. + * + * To make this work properly, the hypervisor can adjust the + * target's VPD information to advertise these limits. But + * for that to work, the guest has to look at the VPD pages, + * which we won't do by default if it is an SPC-2 device, even + * if it does actually support it. + * + * So, set the blist to always try to read the VPD pages. + */ + sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; + + return 0; +} + + /** * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change @@ -775,6 +798,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, @@ -795,6 +819,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c index 1e568c79fcae..3294fc34bdf8 100644 --- a/drivers/soc/qcom/hab/hab.c +++ b/drivers/soc/qcom/hab/hab.c @@ -378,7 +378,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx, physical_channel_rx_dispatch((unsigned long) vchan->pchan); } - message = hab_msg_dequeue(vchan, !nonblocking_flag); + message = hab_msg_dequeue(vchan, flags); if (!message) { if (nonblocking_flag) ret = -EAGAIN; diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h index 72635e70c94c..ffb0637055d4 100644 --- a/drivers/soc/qcom/hab/hab.h +++ b/drivers/soc/qcom/hab/hab.h @@ -147,7 +147,8 @@ struct hab_header { (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT) -#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid)) +#define HAB_HEADER_SET_SESSION_ID(header, sid) \ + ((header).session_id = (sid)) #define HAB_HEADER_SET_SIZE(header, size) \ ((header).id_type_size = ((header).id_type_size & \ @@ -281,8 +282,8 @@ struct uhab_context { }; /* - * array to describe the VM and its MMID configuration as what is connected to - * so this is describing a pchan's remote side + * array to describe the VM and its MMID configuration as + * what is connected to so this is describing a pchan's remote side */ struct vmid_mmid_desc { int vmid; /* remote vmid */ @@ -341,8 +342,9 @@ struct virtual_channel { }; /* - * Struct shared between local and remote, contents are composed by exporter, - * the importer only writes to pdata and local (exporter) domID + * Struct shared between local and remote, contents + * are composed by exporter, the importer only writes + * to pdata and local (exporter) domID */ struct export_desc { uint32_t export_id; @@ -410,16 +412,10 @@ int habmem_hyp_revoke(void *expdata, uint32_t count); void *habmem_imp_hyp_open(void); void habmem_imp_hyp_close(void *priv, int kernel); -long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count, - uint32_t remotedom, - uint64_t *index, - void **pkva, - int kernel, - uint32_t userflags); +int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param, + struct export_desc *exp, int kernel); -long habmm_imp_hyp_unmap(void *priv, uint64_t index, - uint32_t count, - int kernel); +int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp); int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); @@ -427,7 +423,7 @@ int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); void hab_msg_free(struct hab_message *message); struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan, - int wait_flag); + unsigned int flags); void hab_msg_recv(struct physical_channel *pchan, struct hab_header *header); diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c index ecc3f52a6662..a779067ee4c4 100644 --- a/drivers/soc/qcom/hab/hab_mem_linux.c +++ b/drivers/soc/qcom/hab/hab_mem_linux.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,6 +29,9 @@ struct pages_list { uint32_t userflags; struct file *filp_owner; struct file *filp_mapper; + struct dma_buf *dmabuf; + int32_t export_id; + int32_t vcid; }; struct importer_context { @@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd) } -static int habmem_get_dma_pages(unsigned long address, +static int habmem_get_dma_pages_from_va(unsigned long address, int page_count, struct page **pages) { @@ -142,6 +145,56 @@ err: return rc; } +static int habmem_get_dma_pages_from_fd(int32_t fd, + int page_count, + struct page **pages) +{ + struct dma_buf *dmabuf = NULL; + struct scatterlist *s; + struct sg_table *sg_table = NULL; + struct dma_buf_attachment *attach = NULL; + struct page *page; + int i, j, rc = 0; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + attach = dma_buf_attach(dmabuf, hab_driver.dev); + if (IS_ERR_OR_NULL(attach)) { + pr_err("dma_buf_attach failed\n"); + goto err; + } + + sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE); + + if (IS_ERR_OR_NULL(sg_table)) { + pr_err("dma_buf_map_attachment failed\n"); + goto err; + } + + for_each_sg(sg_table->sgl, s, sg_table->nents, i) { + page = sg_page(s); + pr_debug("sgl length %d\n", s->length); + + for (j = 0; j < (s->length >> PAGE_SHIFT); j++) { + pages[rc] = nth_page(page, j); + rc++; + if (WARN_ON(rc >= page_count)) + break; + } + } + +err: + if (!IS_ERR_OR_NULL(sg_table)) + dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE); + if (!IS_ERR_OR_NULL(attach)) + dma_buf_detach(dmabuf, attach); + if (!IS_ERR_OR_NULL(dmabuf)) + dma_buf_put(dmabuf); + return rc; +} + /* * exporter - grant & revoke * degenerate sharabled page list based on CPU friendly virtual "address". @@ -165,7 +218,11 @@ int habmem_hyp_grant_user(unsigned long address, down_read(¤t->mm->mmap_sem); if (HABMM_EXP_MEM_TYPE_DMA & flags) { - ret = habmem_get_dma_pages(address, + ret = habmem_get_dma_pages_from_va(address, + page_count, + pages); + } else if (HABMM_EXPIMP_FLAGS_FD & flags) { + ret = habmem_get_dma_pages_from_fd(address, page_count, pages); } else { @@ -260,30 +317,156 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel) kfree(priv); } -/* - * setup pages, be ready for the following mmap call - * index is output to refer to this imported buffer described by the import data - */ -long habmem_imp_hyp_map(void *imp_ctx, - void *impdata, - uint32_t count, - uint32_t remotedom, - uint64_t *index, - void **pkva, - int kernel, - uint32_t userflags) +static struct sg_table *hab_mem_map_dma_buf( + struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct pages_list *pglist = dmabuf->priv; + struct sg_table *sgt; + struct scatterlist *sg; + int i; + int ret = 0; + struct page **pages = pglist->pages; + + sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL); + if (ret) { + kfree(sgt); + return ERR_PTR(-ENOMEM); + } + + for_each_sg(sgt->sgl, sg, pglist->npages, i) { + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + } + + return sgt; +} + + +static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + sg_free_table(sgt); + kfree(sgt); +} + +static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page; + struct pages_list *pglist; + + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + + /* PHY address */ + unsigned long fault_offset = + (unsigned long)vmf->virtual_address - vma->vm_start + offset; + unsigned long fault_index = fault_offset>>PAGE_SHIFT; + int page_idx; + + if (vma == NULL) + return VM_FAULT_SIGBUS; + + pglist = vma->vm_private_data; + + page_idx = fault_index - pglist->index; + if (page_idx < 0 || page_idx >= pglist->npages) { + pr_err("Out of page array! page_idx %d, pg cnt %ld", + page_idx, pglist->npages); + return VM_FAULT_SIGBUS; + } + + page = pglist->pages[page_idx]; + get_page(page); + vmf->page = page; + return 0; +} + +static void hab_map_open(struct vm_area_struct *vma) +{ +} + +static void hab_map_close(struct vm_area_struct *vma) +{ +} + +static const struct vm_operations_struct habmem_vm_ops = { + .fault = hab_map_fault, + .open = hab_map_open, + .close = hab_map_close, +}; + +static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct pages_list *pglist = dmabuf->priv; + uint32_t obj_size = pglist->npages << PAGE_SHIFT; + + if (vma == NULL) + return VM_FAULT_SIGBUS; + + /* Check for valid size. */ + if (obj_size < vma->vm_end - vma->vm_start) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &habmem_vm_ops; + vma->vm_private_data = pglist; + vma->vm_flags |= VM_MIXEDMAP; + + return 0; +} + +static void hab_mem_dma_buf_release(struct dma_buf *dmabuf) +{ +} + +static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf, + unsigned long offset) +{ + return NULL; +} + +static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf, + unsigned long offset, + void *ptr) +{ +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = hab_mem_map_dma_buf, + .unmap_dma_buf = hab_mem_unmap_dma_buf, + .mmap = hab_mem_mmap, + .release = hab_mem_dma_buf_release, + .kmap_atomic = hab_mem_dma_buf_kmap, + .kunmap_atomic = hab_mem_dma_buf_kunmap, + .kmap = hab_mem_dma_buf_kmap, + .kunmap = hab_mem_dma_buf_kunmap, +}; + +static int habmem_imp_hyp_map_fd(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + int32_t *pfd) { struct page **pages; - struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; struct pages_list *pglist; struct importer_context *priv = imp_ctx; unsigned long pfn; int i, j, k = 0; + pgprot_t prot = PAGE_KERNEL; + int32_t fd; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); if (!pfn_table || !priv) return -EINVAL; - pages = vmalloc(count * sizeof(struct page *)); + pages = vmalloc(exp->payload_count * sizeof(struct page *)); if (!pages) return -ENOMEM; @@ -303,145 +486,230 @@ long habmem_imp_hyp_map(void *imp_ctx, } pglist->pages = pages; - pglist->npages = count; - pglist->kernel = kernel; - pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT; + pglist->npages = exp->payload_count; + pglist->kernel = 0; + pglist->index = 0; pglist->refcntk = pglist->refcntu = 0; pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) + prot = pgprot_writecombine(prot); + + exp_info.ops = &dma_buf_ops; + exp_info.size = exp->payload_count << PAGE_SHIFT; + exp_info.flags = O_RDWR; + exp_info.priv = pglist; + pglist->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(pglist->dmabuf)) { + vfree(pages); + kfree(pglist); + return PTR_ERR(pglist->dmabuf); + } - *index = pglist->index << PAGE_SHIFT; - - if (kernel) { - pgprot_t prot = PAGE_KERNEL; - - if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) - prot = pgprot_writecombine(prot); - - pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot); - if (pglist->kva == NULL) { - vfree(pages); - kfree(pglist); - pr_err("%ld pages vmap failed\n", pglist->npages); - return -ENOMEM; - } else { - pr_debug("%ld pages vmap pass, return %pK\n", - pglist->npages, pglist->kva); - } - - pglist->uva = NULL; - pglist->refcntk++; - *pkva = pglist->kva; - *index = (uint64_t)((uintptr_t)pglist->kva); - } else { - pglist->kva = NULL; + fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC); + if (fd < 0) { + dma_buf_put(pglist->dmabuf); + vfree(pages); + kfree(pglist); + return -EINVAL; } + pglist->refcntk++; + write_lock(&priv->implist_lock); list_add_tail(&pglist->list, &priv->imp_list); priv->cnt++; write_unlock(&priv->implist_lock); - pr_debug("index returned %llx\n", *index); + + *pfd = fd; return 0; } -/* the input index is PHY address shifted for uhab, and kva for khab */ -long habmm_imp_hyp_unmap(void *imp_ctx, - uint64_t index, - uint32_t count, - int kernel) +static int habmem_imp_hyp_map_kva(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + void **pkva) { + struct page **pages; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; + struct pages_list *pglist; struct importer_context *priv = imp_ctx; - struct pages_list *pglist, *tmp; - int found = 0; - uint64_t pg_index = index >> PAGE_SHIFT; - - write_lock(&priv->implist_lock); - list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) { - pr_debug("node pglist %pK, kernel %d, pg_index %llx\n", - pglist, pglist->kernel, pg_index); + unsigned long pfn; + int i, j, k = 0; + pgprot_t prot = PAGE_KERNEL; - if (kernel) { - if (pglist->kva == (void *)((uintptr_t)index)) - found = 1; - } else { - if (pglist->index == pg_index) - found = 1; - } + if (!pfn_table || !priv) + return -EINVAL; + pages = vmalloc(exp->payload_count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; + pglist = kzalloc(sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + vfree(pages); + return -ENOMEM; + } - if (found) { - list_del(&pglist->list); - priv->cnt--; - break; + pfn = pfn_table->first_pfn; + for (i = 0; i < pfn_table->nregions; i++) { + for (j = 0; j < pfn_table->region[i].size; j++) { + pages[k] = pfn_to_page(pfn+j); + k++; } + pfn += pfn_table->region[i].size + pfn_table->region[i].space; } - write_unlock(&priv->implist_lock); - if (!found) { - pr_err("failed to find export id on index %llx\n", index); - return -EINVAL; + pglist->pages = pages; + pglist->npages = exp->payload_count; + pglist->kernel = 1; + pglist->refcntk = pglist->refcntu = 0; + pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) + prot = pgprot_writecombine(prot); + + pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot); + if (pglist->kva == NULL) { + vfree(pages); + kfree(pglist); + pr_err("%ld pages vmap failed\n", pglist->npages); + return -ENOMEM; } - pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n", - pglist, pglist->index, pglist->kernel, priv->cnt); + pr_debug("%ld pages vmap pass, return %p\n", + pglist->npages, pglist->kva); - if (kernel) - if (pglist->kva) - vunmap(pglist->kva); + pglist->refcntk++; - vfree(pglist->pages); - kfree(pglist); + write_lock(&priv->implist_lock); + list_add_tail(&pglist->list, &priv->imp_list); + priv->cnt++; + write_unlock(&priv->implist_lock); + + *pkva = pglist->kva; return 0; } -static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int habmem_imp_hyp_map_uva(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + uint64_t *index) { - struct page *page; + struct page **pages; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; struct pages_list *pglist; + struct importer_context *priv = imp_ctx; + unsigned long pfn; + int i, j, k = 0; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - - /* PHY address */ - unsigned long fault_offset = - (unsigned long)vmf->virtual_address - vma->vm_start + offset; - unsigned long fault_index = fault_offset>>PAGE_SHIFT; - int page_idx; + if (!pfn_table || !priv) + return -EINVAL; - if (vma == NULL) - return VM_FAULT_SIGBUS; + pages = vmalloc(exp->payload_count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; - pglist = vma->vm_private_data; + pglist = kzalloc(sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + vfree(pages); + return -ENOMEM; + } - page_idx = fault_index - pglist->index; - if (page_idx < 0 || page_idx >= pglist->npages) { - pr_err("Out of page array. page_idx %d, pg cnt %ld", - page_idx, pglist->npages); - return VM_FAULT_SIGBUS; + pfn = pfn_table->first_pfn; + for (i = 0; i < pfn_table->nregions; i++) { + for (j = 0; j < pfn_table->region[i].size; j++) { + pages[k] = pfn_to_page(pfn+j); + k++; + } + pfn += pfn_table->region[i].size + pfn_table->region[i].space; } - pr_debug("Fault page index %d\n", page_idx); + pglist->pages = pages; + pglist->npages = exp->payload_count; + pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT; + pglist->refcntk = pglist->refcntu = 0; + pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + write_lock(&priv->implist_lock); + list_add_tail(&pglist->list, &priv->imp_list); + priv->cnt++; + write_unlock(&priv->implist_lock); + + *index = pglist->index << PAGE_SHIFT; - page = pglist->pages[page_idx]; - get_page(page); - vmf->page = page; return 0; } -static void hab_map_open(struct vm_area_struct *vma) +int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param, + struct export_desc *exp, int kernel) { + int ret = 0; + + if (kernel) + ret = habmem_imp_hyp_map_kva(imp_ctx, exp, + param->flags, + (void **)¶m->kva); + else if (param->flags & HABMM_EXPIMP_FLAGS_FD) + ret = habmem_imp_hyp_map_fd(imp_ctx, exp, + param->flags, + (int32_t *)¶m->kva); + else + ret = habmem_imp_hyp_map_uva(imp_ctx, exp, + param->flags, + ¶m->index); + + return ret; } -static void hab_map_close(struct vm_area_struct *vma) +int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp) { -} + struct importer_context *priv = imp_ctx; + struct pages_list *pglist, *tmp; + int found = 0; -static const struct vm_operations_struct habmem_vm_ops = { + write_lock(&priv->implist_lock); + list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) { + if (pglist->export_id == exp->export_id && + pglist->vcid == exp->vcid_remote) { + found = 1; + } - .fault = hab_map_fault, - .open = hab_map_open, - .close = hab_map_close, -}; + if (found) { + list_del(&pglist->list); + priv->cnt--; + break; + } + } + write_unlock(&priv->implist_lock); + + if (!found) { + pr_err("failed to find export id %u\n", exp->export_id); + return -EINVAL; + } + + pr_debug("detach pglist %p, kernel %d, list cnt %d\n", + pglist, pglist->kernel, priv->cnt); + + if (pglist->kva) + vunmap(pglist->kva); + + if (pglist->dmabuf) + dma_buf_put(pglist->dmabuf); + + vfree(pglist->pages); + kfree(pglist); + + return 0; +} int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma) { diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c index 67601590908e..00fbeabed4bb 100644 --- a/drivers/soc/qcom/hab/hab_mimex.c +++ b/drivers/soc/qcom/hab/hab_mimex.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -345,25 +345,20 @@ int hab_mem_import(struct uhab_context *ctx, exp->export_id, exp->payload_count, exp->domid_local, *((uint32_t *)exp->payload)); - ret = habmem_imp_hyp_map(ctx->import_ctx, - exp->payload, - exp->payload_count, - exp->domid_local, - &exp->import_index, - &exp->kva, - kernel, - param->flags); + ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel); + if (ret) { pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n", ret, exp->payload_count, exp->domid_local, *((uint32_t *)exp->payload)); return ret; } - pr_debug("import index %llx, kva %llx, kernel %d\n", - exp->import_index, param->kva, kernel); - param->index = exp->import_index; - param->kva = (uint64_t)exp->kva; + exp->import_index = param->index; + exp->kva = kernel ? (void *)param->kva : NULL; + + pr_debug("import index %llx, kva or fd %llx, kernel %d\n", + exp->import_index, param->kva, kernel); return ret; } @@ -396,13 +391,10 @@ int hab_mem_unimport(struct uhab_context *ctx, if (!found) ret = -EINVAL; else { - ret = habmm_imp_hyp_unmap(ctx->import_ctx, - exp->import_index, - exp->payload_count, - kernel); + ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp); if (ret) { - pr_err("unmap fail id:%d pcnt:%d kernel:%d\n", - exp->export_id, exp->payload_count, kernel); + pr_err("unmap fail id:%d pcnt:%d vcid:%d\n", + exp->export_id, exp->payload_count, exp->vcid_remote); } param->kva = (uint64_t)exp->kva; kfree(exp); diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c index 700239a25652..d5c625e8c1c9 100644 --- a/drivers/soc/qcom/hab/hab_msg.c +++ b/drivers/soc/qcom/hab/hab_msg.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,16 +43,24 @@ void hab_msg_free(struct hab_message *message) } struct hab_message * -hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag) +hab_msg_dequeue(struct virtual_channel *vchan, unsigned int flags) { struct hab_message *message = NULL; int ret = 0; - - if (wait_flag) { - if (hab_rx_queue_empty(vchan)) - ret = wait_event_interruptible(vchan->rx_queue, - !hab_rx_queue_empty(vchan) || - vchan->otherend_closed); + int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING); + int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); + + if (wait) { + if (hab_rx_queue_empty(vchan)) { + if (interruptible) + ret = wait_event_interruptible(vchan->rx_queue, + !hab_rx_queue_empty(vchan) || + vchan->otherend_closed); + else + wait_event(vchan->rx_queue, + !hab_rx_queue_empty(vchan) || + vchan->otherend_closed); + } } /* return all the received messages before the remote close */ @@ -74,7 +82,7 @@ static void hab_msg_queue(struct virtual_channel *vchan, list_add_tail(&message->node, &vchan->rx_list); spin_unlock_bh(&vchan->rx_lock); - wake_up_interruptible(&vchan->rx_queue); + wake_up(&vchan->rx_queue); } static int hab_export_enqueue(struct virtual_channel *vchan, diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c index e8b8866d570d..2db4db8f321b 100644 --- a/drivers/soc/qcom/hab/hab_vchan.c +++ b/drivers/soc/qcom/hab/hab_vchan.c @@ -110,10 +110,7 @@ hab_vchan_free(struct kref *ref) } spin_unlock_bh(&ctx->imp_lock); if (found) { - habmm_imp_hyp_unmap(ctx->import_ctx, - exp->import_index, - exp->payload_count, - ctx->kernel); + habmm_imp_hyp_unmap(ctx->import_ctx, exp); ctx->import_total--; kfree(exp); } @@ -160,7 +157,7 @@ void hab_vchan_stop(struct virtual_channel *vchan) { if (vchan) { vchan->otherend_closed = 1; - wake_up_interruptible(&vchan->rx_queue); + wake_up(&vchan->rx_queue); } } diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c index 1046af031838..1857d369bc94 100644 --- a/drivers/soc/qcom/msm_performance.c +++ b/drivers/soc/qcom/msm_performance.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2379,6 +2379,7 @@ end: static void __ref try_hotplug(struct cluster *data) { unsigned int i; + struct device *dev; if (!clusters_inited) return; @@ -2405,7 +2406,8 @@ static void __ref try_hotplug(struct cluster *data) pr_debug("msm_perf: Offlining CPU%d\n", i); cpumask_set_cpu(i, data->offlined_cpus); lock_device_hotplug(); - if (device_offline(get_cpu_device(i))) { + dev = get_cpu_device(i); + if (!dev || device_offline(dev)) { cpumask_clear_cpu(i, data->offlined_cpus); pr_debug("msm_perf: Offlining CPU%d failed\n", i); @@ -2423,7 +2425,8 @@ static void __ref try_hotplug(struct cluster *data) continue; pr_debug("msm_perf: Onlining CPU%d\n", i); lock_device_hotplug(); - if (device_online(get_cpu_device(i))) { + dev = get_cpu_device(i); + if (!dev || device_online(dev)) { pr_debug("msm_perf: Onlining CPU%d failed\n", i); unlock_device_hotplug(); @@ -2442,11 +2445,19 @@ static void __ref try_hotplug(struct cluster *data) static void __ref release_cluster_control(struct cpumask *off_cpus) { int cpu; + struct device *dev; for_each_cpu(cpu, off_cpus) { pr_debug("msm_perf: Release CPU %d\n", cpu); lock_device_hotplug(); - if (!device_online(get_cpu_device(cpu))) + dev = get_cpu_device(cpu); + if (!dev) { + pr_debug("msm_perf: Failed to get CPU%d\n", + cpu); + unlock_device_hotplug(); + continue; + } + if (!device_online(dev)) cpumask_clear_cpu(cpu, off_cpus); unlock_device_hotplug(); } diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c index b1afd02b49bf..fefc348c0027 100644 --- a/drivers/soc/qcom/qdsp6v2/apr.c +++ b/drivers/soc/qcom/qdsp6v2/apr.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2010-2014, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2014, 2016, 2018 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -209,6 +210,16 @@ static struct apr_svc_table svc_tbl_voice[] = { }, }; +static const struct apr_svc_table svc_tbl_sdsp[] = { + { + /* Micro Audio Service */ + .name = "MAS", + .idx = 0, + .id = APR_SVC_MAS, + .client_id = APR_CLIENT_AUDIO, + }, +}; + enum apr_subsys_state apr_get_modem_state(void) { return atomic_read(&q6.modem_state); @@ -444,6 +455,9 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, */ can_open_channel = false; domain_id = APR_DOMAIN_MODEM; + } else if (!strcmp(dest, "SDSP")) { + domain_id = APR_DOMAIN_SDSP; + pr_debug("APR: SDSP DOMAIN_ID %d\n", domain_id); } else { pr_err("APR: wrong destination\n"); goto done; @@ -472,6 +486,8 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, } } pr_debug("%s: modem Up\n", __func__); + } else if (dest_id == APR_DEST_DSPS) { + pr_debug("%s: Sensor DSP Up\n", __func__); } if (apr_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id)) { @@ -624,6 +640,8 @@ void apr_cb_func(void *buf, int len, void *priv) pr_err("APR: Wrong svc :%d\n", svc); return; } + } else if (hdr->src_domain == APR_DOMAIN_SDSP) { + clnt = APR_CLIENT_AUDIO; } else { pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); return; @@ -700,6 +718,9 @@ int apr_get_svc(const char *svc_name, int domain_id, int *client_id, if ((domain_id == APR_DOMAIN_ADSP)) { tbl = (struct apr_svc_table *)&svc_tbl_qdsp6; size = ARRAY_SIZE(svc_tbl_qdsp6); + } else if (domain_id == APR_DOMAIN_SDSP) { + tbl = (struct apr_svc_table *)&svc_tbl_sdsp; + size = ARRAY_SIZE(svc_tbl_sdsp); } else { tbl = (struct apr_svc_table *)&svc_tbl_voice; size = ARRAY_SIZE(svc_tbl_voice); diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c index 6cffe7be655a..3884667cc12c 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_tal.c +++ b/drivers/soc/qcom/qdsp6v2/apr_tal.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2011, 2013-2014, 2016 The Linux Foundation. +/* Copyright (c) 2010-2011, 2013-2014, 2016, 2018 The Linux Foundation. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -38,6 +38,14 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = { "apr_audio_svc", "apr_voice_svc", }, + { + "", + "", + }, + { + "apr_apps_sdsp", + "apr_apps_sdsp", + }, }; struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX]; @@ -162,7 +170,8 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) || (dl >= APR_DL_MAX)) { - pr_err("apr_tal: Invalid params\n"); + pr_err("apr_tal: Invalid params clnt %d dest %d dl %d\n", + clnt, dest, dl); return NULL; } @@ -184,10 +193,12 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, pr_debug("apr_tal:Wakeup done\n"); apr_svc_ch[dl][dest][clnt].dest_state = 0; } + rc = smd_named_open_on_edge(svc_names[dest][clnt], dest, - &apr_svc_ch[dl][dest][clnt].ch, - &apr_svc_ch[dl][dest][clnt], - apr_tal_notify); + &apr_svc_ch[dl][dest][clnt].ch, + &apr_svc_ch[dl][dest][clnt], + apr_tal_notify); + if (rc < 0) { pr_err("apr_tal: smd_open failed %s\n", svc_names[dest][clnt]); @@ -256,6 +267,12 @@ static int apr_smd_probe(struct platform_device *pdev) clnt = APR_CLIENT_AUDIO; apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else if (pdev->id == APR_DEST_DSPS) { + pr_info("apr_tal:Sensor DSP Is Up\n"); + dest = APR_DEST_DSPS; + clnt = APR_CLIENT_AUDIO; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); } else pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id); @@ -278,6 +295,14 @@ static struct platform_driver apr_modem_driver = { }, }; +static struct platform_driver apr_sdsp_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_apps_sdsp", + .owner = THIS_MODULE, + }, +}; + static int __init apr_tal_init(void) { int i, j, k; @@ -293,6 +318,7 @@ static int __init apr_tal_init(void) } platform_driver_register(&apr_q6_driver); platform_driver_register(&apr_modem_driver); + platform_driver_register(&apr_sdsp_driver); return 0; } device_initcall(apr_tal_init); diff --git a/drivers/soc/qcom/qdsp6v2/apr_v2.c b/drivers/soc/qcom/qdsp6v2/apr_v2.c index 037fb3327ef0..d42f2ff5912e 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_v2.c +++ b/drivers/soc/qcom/qdsp6v2/apr_v2.c @@ -37,6 +37,8 @@ uint16_t apr_get_data_src(struct apr_hdr *hdr) return APR_DEST_MODEM; else if (hdr->src_domain == APR_DOMAIN_ADSP) return APR_DEST_QDSP6; + else if (hdr->src_domain == APR_DOMAIN_SDSP) + return APR_DEST_DSPS; else { pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); return APR_DEST_MAX; /*RETURN INVALID VALUE*/ @@ -47,6 +49,8 @@ int apr_get_dest_id(char *dest) { if (!strcmp(dest, "ADSP")) return APR_DEST_QDSP6; + else if (!strcmp(dest, "SDSP")) + return APR_DEST_DSPS; else return APR_DEST_MODEM; } diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c index 56592ac91e1b..bd555b6e6f3b 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_vm.c +++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c @@ -529,25 +529,23 @@ static int apr_vm_cb_thread(void *data) { uint32_t apr_rx_buf_len; struct aprv2_vm_ack_rx_pkt_available_t apr_ack; + unsigned long delay = jiffies + (HZ / 2); int status = 0; int ret = 0; while (1) { - apr_rx_buf_len = sizeof(apr_rx_buf); - ret = habmm_socket_recv(hab_handle_rx, - (void *)&apr_rx_buf, - &apr_rx_buf_len, - 0xFFFFFFFF, - 0); + do { + apr_rx_buf_len = sizeof(apr_rx_buf); + ret = habmm_socket_recv(hab_handle_rx, + (void *)&apr_rx_buf, + &apr_rx_buf_len, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && (ret == -EAGAIN) && + (apr_rx_buf_len == 0)); if (ret) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, ret); - /* - * TODO: depends on the HAB error code, - * may need to implement - * a retry mechanism. - * break if recv failed ? - */ break; } diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c index 7ef16ad5575b..15c3e7e42c6d 100644 --- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c +++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c @@ -83,6 +83,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client, struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp; struct msm_audio_smmu_map_data *map_data = NULL; struct msm_audio_smmu_vm_map_cmd smmu_map_cmd; + unsigned long delay = jiffies + (HZ / 2); rc = ion_handle_get_size(client, handle, len); if (rc) { @@ -122,12 +123,15 @@ static int msm_audio_ion_smmu_map(struct ion_client *client, goto err; } - cmd_rsp_size = sizeof(cmd_rsp); - rc = habmm_socket_recv(msm_audio_ion_hab_handle, - (void *)&cmd_rsp, - &cmd_rsp_size, - 0xFFFFFFFF, - 0); + do { + cmd_rsp_size = sizeof(cmd_rsp); + rc = habmm_socket_recv(msm_audio_ion_hab_handle, + (void *)&cmd_rsp, + &cmd_rsp_size, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && (rc == -EAGAIN) && + (cmd_rsp_size == 0)); if (rc) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, rc); @@ -181,6 +185,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client, struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp; struct msm_audio_smmu_map_data *map_data, *next; struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd; + unsigned long delay = jiffies + (HZ / 2); /* * Though list_for_each_entry_safe is delete safe, lock @@ -205,12 +210,15 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client, goto err; } - cmd_rsp_size = sizeof(cmd_rsp); - rc = habmm_socket_recv(msm_audio_ion_hab_handle, - (void *)&cmd_rsp, - &cmd_rsp_size, - 0xFFFFFFFF, - 0); + do { + cmd_rsp_size = sizeof(cmd_rsp); + rc = habmm_socket_recv(msm_audio_ion_hab_handle, + (void *)&cmd_rsp, + &cmd_rsp_size, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && + (rc == -EAGAIN) && (cmd_rsp_size == 0)); if (rc) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, rc); diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c index c560ec7d7401..f01ab2499a75 100644 --- a/drivers/soc/qcom/qdsp6v2/voice_svc.c +++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -773,7 +773,7 @@ static int voice_svc_probe(struct platform_device *pdev) if (ret) { pr_err("%s: Failed to alloc chrdev\n", __func__); ret = -ENODEV; - goto chrdev_err; + goto done; } voice_svc_dev->major = MAJOR(device_num); @@ -820,8 +820,6 @@ dev_err: class_destroy(voice_svc_class); class_err: unregister_chrdev_region(0, MINOR_NUMBER); -chrdev_err: - kfree(voice_svc_dev); done: return ret; } @@ -835,7 +833,6 @@ static int voice_svc_remove(struct platform_device *pdev) device_destroy(voice_svc_class, device_num); class_destroy(voice_svc_class); unregister_chrdev_region(0, MINOR_NUMBER); - kfree(voice_svc_dev); return 0; } diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c index b54af9eae8ec..ed7493d063ae 100644 --- a/drivers/soc/qcom/rpm_stats.c +++ b/drivers/soc/qcom/rpm_stats.c @@ -430,7 +430,7 @@ static ssize_t rpmstats_show(struct kobject *kobj, prvdata); } - ret = snprintf(buf, prvdata->len, prvdata->buf); + ret = snprintf(buf, prvdata->len, "%s", prvdata->buf); iounmap(prvdata->reg_base); ioremap_fail: kfree(prvdata); diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c index dfa00f56476a..a788c8c3673e 100644 --- a/drivers/soc/qcom/scm_qcpe.c +++ b/drivers/soc/qcom/scm_qcpe.c @@ -195,32 +195,13 @@ static int scm_remap_error(int err) return -EINVAL; } -static int get_hab_vmid(u32 *mm_ip_id) -{ - int result, i; - struct device_node *hab_node = NULL; - int tmp = -1; - - /* parse device tree*/ - pr_info("parsing hab node in device tree...\n"); - hab_node = of_find_compatible_node(NULL, NULL, "qcom,hab"); - if (hab_node) { - /* read local vmid of this VM, like 0 for host, 1 for AGL GVM */ - result = of_property_read_u32(hab_node, "vmid", &tmp); - if (!result) { - pr_info("local vmid = %d\n", tmp); - *mm_ip_id = MM_QCPE_START + tmp; - return 0; - } - pr_err("failed to read local vmid, result = %d\n", result); - } else { - pr_err("no hab device tree node\n"); - } +#define MAX_SCM_ARGS 10 - pr_info("assuming default vmid = 2\n"); - *mm_ip_id = MM_QCPE_VM2; - return 0; -} +struct qcpe_msg_s { + uint64_t fn_id; + uint64_t arginfo; + uint64_t args[MAX_SCM_ARGS]; +}; static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) { @@ -228,16 +209,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) static u32 handle; u32 ret; u32 size_bytes; - - struct smc_params_s { - uint64_t x0; - uint64_t x1; - uint64_t x2; - uint64_t x3; - uint64_t x4; - uint64_t x5; - uint64_t sid; - } smc_params; + struct qcpe_msg_s msg; pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx", fn_id, desc->arginfo, desc->args[0], desc->args[1], @@ -245,15 +217,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) desc->args[5], desc->args[6]); if (!opened) { - u32 mm_ip_id; - - ret = get_hab_vmid(&mm_ip_id); - if (ret) { - pr_err("scm_call_qcpe: get_hab_vmid failed with ret = %d", - ret); - return ret; - } - ret = habmm_socket_open(&handle, mm_ip_id, 0, 0); + ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0); if (ret) { pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d", ret); @@ -262,39 +226,45 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) opened = true; } - smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */ - smc_params.x1 = desc->arginfo; - smc_params.x2 = desc->args[0]; - smc_params.x3 = desc->args[1]; - smc_params.x4 = desc->args[2]; - smc_params.x5 = desc->x5; - smc_params.sid = 0; + msg.fn_id = fn_id | 0x40000000; /* SMC64_MASK */ + msg.arginfo = desc->arginfo; + msg.args[0] = desc->args[0]; + msg.args[1] = desc->args[1]; + msg.args[2] = desc->args[2]; + msg.args[3] = desc->x5; + msg.args[4] = 0; - ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0); - if (ret) + ret = habmm_socket_send(handle, &msg, sizeof(msg), 0); + if (ret) { + pr_err("scm_call_qcpe: habmm_socket_send failed with ret = %d", + ret); return ret; + } - size_bytes = sizeof(smc_params); - memset(&smc_params, 0x0, sizeof(smc_params)); + size_bytes = sizeof(msg); + memset(&msg, 0x0, sizeof(msg)); - ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0); - if (ret) + ret = habmm_socket_recv(handle, &msg, &size_bytes, 0, 0); + if (ret) { + pr_err("scm_call_qcpe: habmm_socket_recv failed with ret = %d", + ret); return ret; + } - if (size_bytes != sizeof(smc_params)) { + if (size_bytes != sizeof(msg)) { pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n", - sizeof(smc_params), size_bytes); + sizeof(msg), size_bytes); return SCM_ERROR; } - desc->ret[0] = smc_params.x1; - desc->ret[1] = smc_params.x2; - desc->ret[2] = smc_params.x3; + desc->ret[0] = msg.args[1]; + desc->ret[1] = msg.args[2]; + desc->ret[2] = msg.args[3]; pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx", - smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]); + msg.args[0], msg.args[1], msg.args[2], msg.args[3]); - return smc_params.x0; + return msg.args[0]; } static u32 smc(u32 cmd_addr) diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 1ddba9ae8c0f..c872a2e54c4b 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(&spi->dev, !t->rx_dma)) { + if (dma_mapping_error(&spi->dev, t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index a6d7029a85ac..581df3ebfc88 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -120,8 +120,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev) { struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); - clk_disable_unprepare(dwsmmio->clk); dw_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); return 0; } diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index ed8283e7397a..83b53cd956aa 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -457,6 +457,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; + void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; @@ -517,8 +519,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, if (l & OMAP2_MCSPI_CHCONF_TURBO) { elements--; - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { + if (!mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); @@ -536,8 +538,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, return count; } } - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { + if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index e77add01b0e9..48888ab630c2 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -457,7 +457,7 @@ err_free_master: static int sun6i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 26629b856f91..6c4445863705 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -90,6 +90,15 @@ config ONESHOT_SYNC_USER help Provide a userspace API for creating oneshot sync objects. +config ANDROID_VSOC + tristate "Android Virtual SoC support" + default n + depends on PCI_MSI + ---help--- + This option adds support for the Virtual SoC driver needed to boot + a 'cuttlefish' Android image inside QEmu. The driver interacts with + a QEmu ivshmem device. If built as a module, it will be called vsoc. + source "drivers/staging/android/ion/Kconfig" source "drivers/staging/android/fiq_debugger/Kconfig" diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index b0b47ae4c0ea..8ef816152020 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o obj-$(CONFIG_SYNC) += sync.o sync_debug.o obj-$(CONFIG_SW_SYNC) += sw_sync.o obj-$(CONFIG_ONESHOT_SYNC) += oneshot_sync.o +obj-$(CONFIG_ANDROID_VSOC) += vsoc.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 8f3ac37bfe12..0c32c00fa700 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -25,5 +25,15 @@ ion/ exposes existing cma regions and doesn't reserve unecessarily memory when booting a system which doesn't use ion. +vsoc.c, uapi/vsoc_shm.h + - The current driver uses the same wait queue for all of the futexes in a + region. This will cause false wakeups in regions with a large number of + waiting threads. We should eventually use multiple queues and select the + queue based on the region. + - Add debugfs support for examining the permissions of regions. + - Use ioremap_wc instead of ioremap_nocache. + - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been + superseded by the futex and is there for legacy reasons. + Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Arve HjønnevÃ¥g <arve@android.com> and Riley Andrews <riandrews@android.com> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a9c1d93c557e..cba6b4e17fee 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&ashmem_mutex); if (asma->size == 0) { - ret = -EINVAL; - goto out; + mutex_unlock(&ashmem_mutex); + return -EINVAL; } if (!asma->file) { - ret = -EBADF; - goto out; + mutex_unlock(&ashmem_mutex); + return -EBADF; } + mutex_unlock(&ashmem_mutex); + ret = vfs_llseek(asma->file, offset, origin); if (ret < 0) - goto out; + return ret; /** Copy f_pos from backing file, since f_ops->llseek() sets it */ file->f_pos = asma->file->f_pos; - -out: - mutex_unlock(&ashmem_mutex); return ret; } @@ -698,16 +697,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, size_t pgstart, pgend; int ret = -EINVAL; + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + return -EFAULT; + mutex_lock(&ashmem_mutex); if (unlikely(!asma->file)) goto out_unlock; - if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) { - ret = -EFAULT; - goto out_unlock; - } - /* per custom, you can pass zero for len to mean "everything onward" */ if (!pin.len) pin.len = PAGE_ALIGN(asma->size) - pin.offset; diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h new file mode 100644 index 000000000000..741b1387c25b --- /dev/null +++ b/drivers/staging/android/uapi/vsoc_shm.h @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_VSOC_SHM_H +#define _UAPI_LINUX_VSOC_SHM_H + +#include <linux/types.h> + +/** + * A permission is a token that permits a receiver to read and/or write an area + * of memory within a Vsoc region. + * + * An fd_scoped permission grants both read and write access, and can be + * attached to a file description (see open(2)). + * Ownership of the area can then be shared by passing a file descriptor + * among processes. + * + * begin_offset and end_offset define the area of memory that is controlled by + * the permission. owner_offset points to a word, also in shared memory, that + * controls ownership of the area. + * + * ownership of the region expires when the associated file description is + * released. + * + * At most one permission can be attached to each file description. + * + * This is useful when implementing HALs like gralloc that scope and pass + * ownership of shared resources via file descriptors. + * + * The caller is responsibe for doing any fencing. + * + * The calling process will normally identify a currently free area of + * memory. It will construct a proposed fd_scoped_permission_arg structure: + * + * begin_offset and end_offset describe the area being claimed + * + * owner_offset points to the location in shared memory that indicates the + * owner of the area. + * + * owned_value is the value that will be stored in owner_offset iff the + * permission can be granted. It must be different than VSOC_REGION_FREE. + * + * Two fd_scoped_permission structures are compatible if they vary only by + * their owned_value fields. + * + * The driver ensures that, for any group of simultaneous callers proposing + * compatible fd_scoped_permissions, it will accept exactly one of the + * propopsals. The other callers will get a failure with errno of EAGAIN. + * + * A process receiving a file descriptor can identify the region being + * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl. + */ +struct fd_scoped_permission { + __u32 begin_offset; + __u32 end_offset; + __u32 owner_offset; + __u32 owned_value; +}; + +/* + * This value represents a free area of memory. The driver expects to see this + * value at owner_offset when creating a permission otherwise it will not do it, + * and will write this value back once the permission is no longer needed. + */ +#define VSOC_REGION_FREE ((__u32)0) + +/** + * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION + */ +struct fd_scoped_permission_arg { + struct fd_scoped_permission perm; + __s32 managed_region_fd; +}; + +#define VSOC_NODE_FREE ((__u32)0) + +/* + * Describes a signal table in shared memory. Each non-zero entry in the + * table indicates that the receiver should signal the futex at the given + * offset. Offsets are relative to the region, not the shared memory window. + * + * interrupt_signalled_offset is used to reliably signal interrupts across the + * vmm boundary. There are two roles: transmitter and receiver. For example, + * in the host_to_guest_signal_table the host is the transmitter and the + * guest is the receiver. The protocol is as follows: + * + * 1. The transmitter should convert the offset of the futex to an offset + * in the signal table [0, (1 << num_nodes_lg2)) + * The transmitter can choose any appropriate hashing algorithm, including + * hash = futex_offset & ((1 << num_nodes_lg2) - 1) + * + * 3. The transmitter should atomically compare and swap futex_offset with 0 + * at hash. There are 3 possible outcomes + * a. The swap fails because the futex_offset is already in the table. + * The transmitter should stop. + * b. Some other offset is in the table. This is a hash collision. The + * transmitter should move to another table slot and try again. One + * possible algorithm: + * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1) + * c. The swap worked. Continue below. + * + * 3. The transmitter atomically swaps 1 with the value at the + * interrupt_signalled_offset. There are two outcomes: + * a. The prior value was 1. In this case an interrupt has already been + * posted. The transmitter is done. + * b. The prior value was 0, indicating that the receiver may be sleeping. + * The transmitter will issue an interrupt. + * + * 4. On waking the receiver immediately exchanges a 0 with the + * interrupt_signalled_offset. If it receives a 0 then this a spurious + * interrupt. That may occasionally happen in the current protocol, but + * should be rare. + * + * 5. The receiver scans the signal table by atomicaly exchanging 0 at each + * location. If a non-zero offset is returned from the exchange the + * receiver wakes all sleepers at the given offset: + * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT); + * + * 6. The receiver thread then does a conditional wait, waking immediately + * if the value at interrupt_signalled_offset is non-zero. This catches cases + * here additional signals were posted while the table was being scanned. + * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT + * ioctl. + */ +struct vsoc_signal_table_layout { + /* log_2(Number of signal table entries) */ + __u32 num_nodes_lg2; + /* + * Offset to the first signal table entry relative to the start of the + * region + */ + __u32 futex_uaddr_table_offset; + /* + * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates + * that one or more offsets are currently posted in the table. + * semi-unique access to an entry in the table + */ + __u32 interrupt_signalled_offset; +}; + +#define VSOC_REGION_WHOLE ((__s32)0) +#define VSOC_DEVICE_NAME_SZ 16 + +/** + * Each HAL would (usually) talk to a single device region + * Mulitple entities care about these regions: + * - The ivshmem_server will populate the regions in shared memory + * - The guest kernel will read the region, create minor device nodes, and + * allow interested parties to register for FUTEX_WAKE events in the region + * - HALs will access via the minor device nodes published by the guest kernel + * - Host side processes will access the region via the ivshmem_server: + * 1. Pass name to ivshmem_server at a UNIX socket + * 2. ivshmemserver will reply with 2 fds: + * - host->guest doorbell fd + * - guest->host doorbell fd + * - fd for the shared memory region + * - region offset + * 3. Start a futex receiver thread on the doorbell fd pointed at the + * signal_nodes + */ +struct vsoc_device_region { + __u16 current_version; + __u16 min_compatible_version; + __u32 region_begin_offset; + __u32 region_end_offset; + __u32 offset_of_region_data; + struct vsoc_signal_table_layout guest_to_host_signal_table; + struct vsoc_signal_table_layout host_to_guest_signal_table; + /* Name of the device. Must always be terminated with a '\0', so + * the longest supported device name is 15 characters. + */ + char device_name[VSOC_DEVICE_NAME_SZ]; + /* There are two ways that permissions to access regions are handled: + * - When subdivided_by is VSOC_REGION_WHOLE, any process that can + * open the device node for the region gains complete access to it. + * - When subdivided is set processes that open the region cannot + * access it. Access to a sub-region must be established by invoking + * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region + * referenced in subdivided_by, providing a fileinstance + * (represented by a fd) opened on this region. + */ + __u32 managed_by; +}; + +/* + * The vsoc layout descriptor. + * The first 4K should be reserved for the shm header and region descriptors. + * The regions should be page aligned. + */ + +struct vsoc_shm_layout_descriptor { + __u16 major_version; + __u16 minor_version; + + /* size of the shm. This may be redundant but nice to have */ + __u32 size; + + /* number of shared memory regions */ + __u32 region_count; + + /* The offset to the start of region descriptors */ + __u32 vsoc_region_desc_offset; +}; + +/* + * This specifies the current version that should be stored in + * vsoc_shm_layout_descriptor.major_version and + * vsoc_shm_layout_descriptor.minor_version. + * It should be updated only if the vsoc_device_region and + * vsoc_shm_layout_descriptor structures have changed. + * Versioning within each region is transferred + * via the min_compatible_version and current_version fields in + * vsoc_device_region. The driver does not consult these fields: they are left + * for the HALs and host processes and will change independently of the layout + * version. + */ +#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2 +#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0 + +#define VSOC_CREATE_FD_SCOPED_PERMISSION \ + _IOW(0xF5, 0, struct fd_scoped_permission) +#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt if one is not already + * in flight. + */ +#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2) + +/* + * When this returns the guest will scan host_to_guest_signal_table to + * check for new futexes to wake. + */ +/* TODO(ghartman): Consider moving this to the bottom half */ +#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3) + +/* + * Guest HALs will use this to retrieve the region description after + * opening their device node. + */ +#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region) + +/* + * Wake any threads that may be waiting for a host interrupt on this region. + * This is mostly used during shutdown. + */ +#define VSOC_SELF_INTERRUPT _IO(0xF5, 5) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt unconditionally. + */ +#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6) + +enum wait_types { + VSOC_WAIT_UNDEFINED = 0, + VSOC_WAIT_IF_EQUAL = 1, + VSOC_WAIT_IF_EQUAL_TIMEOUT = 2 +}; + +/* + * Wait for a condition to be true + * + * Note, this is sized and aligned so the 32 bit and 64 bit layouts are + * identical. + */ +struct vsoc_cond_wait { + /* Input: Offset of the 32 bit word to check */ + __u32 offset; + /* Input: Value that will be compared with the offset */ + __u32 value; + /* Monotonic time to wake at in seconds */ + __u64 wake_time_sec; + /* Input: Monotonic time to wait in nanoseconds */ + __u32 wake_time_nsec; + /* Input: Type of wait */ + __u32 wait_type; + /* Output: Number of times the thread woke before returning. */ + __u32 wakes; + /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit + * compatibility. + */ + __u32 reserved_1; +}; + +#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait) + +/* Wake any local threads waiting at the offset given in arg */ +#define VSOC_COND_WAKE _IO(0xF5, 8) + +#endif /* _UAPI_LINUX_VSOC_SHM_H */ diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c new file mode 100644 index 000000000000..587c66d709b9 --- /dev/null +++ b/drivers/staging/android/vsoc.c @@ -0,0 +1,1169 @@ +/* + * drivers/android/staging/vsoc.c + * + * Android Virtual System on a Chip (VSoC) driver + * + * Copyright (C) 2017 Google, Inc. + * + * Author: ghartman@google.com + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory + * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca> + * + * Based on cirrusfb.c and 8139cp.c: + * Copyright 1999-2001 Jeff Garzik + * Copyright 2001-2004 Jeff Garzik + */ + +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/futex.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/cdev.h> +#include <linux/file.h> +#include "uapi/vsoc_shm.h" + +#define VSOC_DEV_NAME "vsoc" + +/* + * Description of the ivshmem-doorbell PCI device used by QEmu. These + * constants follow docs/specs/ivshmem-spec.txt, which can be found in + * the QEmu repository. This was last reconciled with the version that + * came out with 2.8 + */ + +/* + * These constants are determined KVM Inter-VM shared memory device + * register offsets + */ +enum { + INTR_MASK = 0x00, /* Interrupt Mask */ + INTR_STATUS = 0x04, /* Interrupt Status */ + IV_POSITION = 0x08, /* VM ID */ + DOORBELL = 0x0c, /* Doorbell */ +}; + +static const int REGISTER_BAR; /* Equal to 0 */ +static const int MAX_REGISTER_BAR_LEN = 0x100; +/* + * The MSI-x BAR is not used directly. + * + * static const int MSI_X_BAR = 1; + */ +static const int SHARED_MEMORY_BAR = 2; + +struct vsoc_region_data { + char name[VSOC_DEVICE_NAME_SZ + 1]; + wait_queue_head_t interrupt_wait_queue; + /* TODO(b/73664181): Use multiple futex wait queues */ + wait_queue_head_t futex_wait_queue; + /* Flag indicating that an interrupt has been signalled by the host. */ + atomic_t *incoming_signalled; + /* Flag indicating the guest has signalled the host. */ + atomic_t *outgoing_signalled; + int irq_requested; + int device_created; +}; + +struct vsoc_device { + /* Kernel virtual address of REGISTER_BAR. */ + void __iomem *regs; + /* Physical address of SHARED_MEMORY_BAR. */ + phys_addr_t shm_phys_start; + /* Kernel virtual address of SHARED_MEMORY_BAR. */ + void *kernel_mapped_shm; + /* Size of the entire shared memory window in bytes. */ + size_t shm_size; + /* + * Pointer to the virtual address of the shared memory layout structure. + * This is probably identical to kernel_mapped_shm, but saving this + * here saves a lot of annoying casts. + */ + struct vsoc_shm_layout_descriptor *layout; + /* + * Points to a table of region descriptors in the kernel's virtual + * address space. Calculated from + * vsoc_shm_layout_descriptor.vsoc_region_desc_offset + */ + struct vsoc_device_region *regions; + /* Head of a list of permissions that have been granted. */ + struct list_head permissions; + struct pci_dev *dev; + /* Per-region (and therefore per-interrupt) information. */ + struct vsoc_region_data *regions_data; + /* + * Table of msi-x entries. This has to be separated from struct + * vsoc_region_data because the kernel deals with them as an array. + */ + struct msix_entry *msix_entries; + /* + * Flags that indicate what we've initialzied. These are used to do an + * orderly cleanup of the device. + */ + char enabled_device; + char requested_regions; + char cdev_added; + char class_added; + char msix_enabled; + /* Mutex that protectes the permission list */ + struct mutex mtx; + /* Major number assigned by the kernel */ + int major; + + struct cdev cdev; + struct class *class; +}; + +static struct vsoc_device vsoc_dev; + +/* + * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions. + */ + +struct fd_scoped_permission_node { + struct fd_scoped_permission permission; + struct list_head list; +}; + +struct vsoc_private_data { + struct fd_scoped_permission_node *fd_scoped_permission_node; +}; + +static long vsoc_ioctl(struct file *, unsigned int, unsigned long); +static int vsoc_mmap(struct file *, struct vm_area_struct *); +static int vsoc_open(struct inode *, struct file *); +static int vsoc_release(struct inode *, struct file *); +static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *); +static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *); +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin); +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg *__user arg); +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm); +static long do_vsoc_describe_region(struct file *, + struct vsoc_device_region __user *); +static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off); + +/** + * Validate arguments on entry points to the driver. + */ +inline int vsoc_validate_inode(struct inode *inode) +{ + if (iminor(inode) >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "describe_region: invalid region %d\n", iminor(inode)); + return -ENODEV; + } + return 0; +} + +inline int vsoc_validate_filep(struct file *filp) +{ + int ret = vsoc_validate_inode(file_inode(filp)); + + if (ret) + return ret; + if (!filp->private_data) { + dev_err(&vsoc_dev.dev->dev, + "No private data on fd, region %d\n", + iminor(file_inode(filp))); + return -EBADFD; + } + return 0; +} + +/* Converts from shared memory offset to virtual address */ +static inline void *shm_off_to_virtual_addr(__u32 offset) +{ + return vsoc_dev.kernel_mapped_shm + offset; +} + +/* Converts from shared memory offset to physical address */ +static inline phys_addr_t shm_off_to_phys_addr(__u32 offset) +{ + return vsoc_dev.shm_phys_start + offset; +} + +/** + * Convenience functions to obtain the region from the inode or file. + * Dangerous to call before validating the inode/file. + */ +static inline struct vsoc_device_region *vsoc_region_from_inode( + struct inode *inode) +{ + return &vsoc_dev.regions[iminor(inode)]; +} + +static inline struct vsoc_device_region *vsoc_region_from_filep( + struct file *inode) +{ + return vsoc_region_from_inode(file_inode(inode)); +} + +static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r) +{ + return r->region_end_offset - r->region_begin_offset; +} + +static const struct file_operations vsoc_ops = { + .owner = THIS_MODULE, + .open = vsoc_open, + .mmap = vsoc_mmap, + .read = vsoc_read, + .unlocked_ioctl = vsoc_ioctl, + .compat_ioctl = vsoc_ioctl, + .write = vsoc_write, + .llseek = vsoc_lseek, + .release = vsoc_release, +}; + +static struct pci_device_id vsoc_id_table[] = { + {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, vsoc_id_table); + +static void vsoc_remove_device(struct pci_dev *pdev); +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static struct pci_driver vsoc_pci_driver = { + .name = "vsoc", + .id_table = vsoc_id_table, + .probe = vsoc_probe_device, + .remove = vsoc_remove_device, +}; + +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg *__user arg) +{ + struct file *managed_filp; + s32 managed_fd; + atomic_t *owner_ptr = NULL; + struct vsoc_device_region *managed_region_p; + + if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) || + copy_from_user(&managed_fd, + &arg->managed_region_fd, sizeof(managed_fd))) { + return -EFAULT; + } + managed_filp = fdget(managed_fd).file; + /* Check that it's a valid fd, */ + if (!managed_filp || vsoc_validate_filep(managed_filp)) + return -EPERM; + /* EEXIST if the given fd already has a permission. */ + if (((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node) + return -EEXIST; + managed_region_p = vsoc_region_from_filep(managed_filp); + /* Check that the provided region is managed by this one */ + if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p) + return -EPERM; + /* The area must be well formed and have non-zero size */ + if (np->permission.begin_offset >= np->permission.end_offset) + return -EINVAL; + /* The area must fit in the memory window */ + if (np->permission.end_offset > + vsoc_device_region_size(managed_region_p)) + return -ERANGE; + /* The area must be in the region data section */ + if (np->permission.begin_offset < + managed_region_p->offset_of_region_data) + return -ERANGE; + /* The area must be page aligned */ + if (!PAGE_ALIGNED(np->permission.begin_offset) || + !PAGE_ALIGNED(np->permission.end_offset)) + return -EINVAL; + /* Owner offset must be naturally aligned in the window */ + if (np->permission.owner_offset & + (sizeof(np->permission.owner_offset) - 1)) + return -EINVAL; + /* The owner flag must reside in the owner memory */ + if (np->permission.owner_offset + sizeof(np->permission.owner_offset) > + vsoc_device_region_size(region_p)) + return -ERANGE; + /* The owner flag must reside in the data section */ + if (np->permission.owner_offset < region_p->offset_of_region_data) + return -EINVAL; + /* The owner value must change to claim the memory */ + if (np->permission.owned_value == VSOC_REGION_FREE) + return -EINVAL; + owner_ptr = + (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset + + np->permission.owner_offset); + /* We've already verified that this is in the shared memory window, so + * it should be safe to write to this address. + */ + if (atomic_cmpxchg(owner_ptr, + VSOC_REGION_FREE, + np->permission.owned_value) != VSOC_REGION_FREE) { + return -EBUSY; + } + ((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node = np; + /* The file offset needs to be adjusted if the calling + * process did any read/write operations on the fd + * before creating the permission. + */ + if (managed_filp->f_pos) { + if (managed_filp->f_pos > np->permission.end_offset) { + /* If the offset is beyond the permission end, set it + * to the end. + */ + managed_filp->f_pos = np->permission.end_offset; + } else { + /* If the offset is within the permission interval + * keep it there otherwise reset it to zero. + */ + if (managed_filp->f_pos < np->permission.begin_offset) { + managed_filp->f_pos = 0; + } else { + managed_filp->f_pos -= + np->permission.begin_offset; + } + } + } + return 0; +} + +static void do_destroy_fd_scoped_permission_node( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission_node *node) +{ + if (node) { + do_destroy_fd_scoped_permission(owner_region_p, + &node->permission); + mutex_lock(&vsoc_dev.mtx); + list_del(&node->list); + mutex_unlock(&vsoc_dev.mtx); + kfree(node); + } +} + +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm) +{ + atomic_t *owner_ptr = NULL; + int prev = 0; + + if (!perm) + return; + owner_ptr = (atomic_t *)shm_off_to_virtual_addr( + owner_region_p->region_begin_offset + perm->owner_offset); + prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE); + if (prev != perm->owned_value) + dev_err(&vsoc_dev.dev->dev, + "%x-%x: owner (%s) %x: expected to be %x was %x", + perm->begin_offset, perm->end_offset, + owner_region_p->device_name, perm->owner_offset, + perm->owned_value, prev); +} + +static long do_vsoc_describe_region(struct file *filp, + struct vsoc_device_region __user *dest) +{ + struct vsoc_device_region *region_p; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + if (copy_to_user(dest, region_p, sizeof(*region_p))) + return -EFAULT; + return 0; +} + +/** + * Implements the inner logic of cond_wait. Copies to and from userspace are + * done in the helper function below. + */ +static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) +{ + DEFINE_WAIT(wait); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + struct hrtimer_sleeper timeout, *to = NULL; + int ret = 0; + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + atomic_t *address = NULL; + struct timespec ts; + + /* Ensure that the offset is aligned */ + if (arg->offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)arg->offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + address = shm_off_to_virtual_addr(region_p->region_begin_offset + + arg->offset); + + /* Ensure that the type of wait is valid */ + switch (arg->wait_type) { + case VSOC_WAIT_IF_EQUAL: + break; + case VSOC_WAIT_IF_EQUAL_TIMEOUT: + to = &timeout; + break; + default: + return -EINVAL; + } + + if (to) { + /* Copy the user-supplied timesec into the kernel structure. + * We do things this way to flatten differences between 32 bit + * and 64 bit timespecs. + */ + ts.tv_sec = arg->wake_time_sec; + ts.tv_nsec = arg->wake_time_nsec; + + if (!timespec_valid(&ts)) + return -EINVAL; + hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + current->timer_slack_ns); + + hrtimer_init_sleeper(to, current); + } + + while (1) { + prepare_to_wait(&data->futex_wait_queue, &wait, + TASK_INTERRUPTIBLE); + /* + * Check the sentinel value after prepare_to_wait. If the value + * changes after this check the writer will call signal, + * changing the task state from INTERRUPTIBLE to RUNNING. That + * will ensure that schedule() will eventually schedule this + * task. + */ + if (atomic_read(address) != arg->value) { + ret = 0; + break; + } + if (to) { + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); + if (likely(to->task)) + freezable_schedule(); + hrtimer_cancel(&to->timer); + if (!to->task) { + ret = -ETIMEDOUT; + break; + } + } else { + freezable_schedule(); + } + /* Count the number of times that we woke up. This is useful + * for unit testing. + */ + ++arg->wakes; + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + finish_wait(&data->futex_wait_queue, &wait); + if (to) + destroy_hrtimer_on_stack(&to->timer); + return ret; +} + +/** + * Handles the details of copying from/to userspace to ensure that the copies + * happen on all of the return paths of cond_wait. + */ +static int do_vsoc_cond_wait(struct file *filp, + struct vsoc_cond_wait __user *untrusted_in) +{ + struct vsoc_cond_wait arg; + int rval = 0; + + if (copy_from_user(&arg, untrusted_in, sizeof(arg))) + return -EFAULT; + /* wakes is an out parameter. Initialize it to something sensible. */ + arg.wakes = 0; + rval = handle_vsoc_cond_wait(filp, &arg); + if (copy_to_user(untrusted_in, &arg, sizeof(arg))) + return -EFAULT; + return rval; +} + +static int do_vsoc_cond_wake(struct file *filp, uint32_t offset) +{ + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + /* Ensure that the offset is aligned */ + if (offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + /* + * TODO(b/73664181): Use multiple futex wait queues. + * We need to wake every sleeper when the condition changes. Typically + * only a single thread will be waiting on the condition, but there + * are exceptions. The worst case is about 10 threads. + */ + wake_up_interruptible_all(&data->futex_wait_queue); + return 0; +} + +static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int rv = 0; + struct vsoc_device_region *region_p; + u32 reg_num; + struct vsoc_region_data *reg_data; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + reg_num = iminor(file_inode(filp)); + reg_data = vsoc_dev.regions_data + reg_num; + switch (cmd) { + case VSOC_CREATE_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = NULL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + /* We can't allocate memory for the permission */ + if (!node) + return -ENOMEM; + INIT_LIST_HEAD(&node->list); + rv = do_create_fd_scoped_permission( + region_p, + node, + (struct fd_scoped_permission_arg __user *)arg); + if (!rv) { + mutex_lock(&vsoc_dev.mtx); + list_add(&node->list, &vsoc_dev.permissions); + mutex_unlock(&vsoc_dev.mtx); + } else { + kfree(node); + return rv; + } + } + break; + + case VSOC_GET_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = + ((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node; + if (!node) + return -ENOENT; + if (copy_to_user + ((struct fd_scoped_permission __user *)arg, + &node->permission, sizeof(node->permission))) + return -EFAULT; + } + break; + + case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST: + if (!atomic_xchg( + reg_data->outgoing_signalled, + 1)) { + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + } else { + return -EBUSY; + } + break; + + case VSOC_SEND_INTERRUPT_TO_HOST: + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + + case VSOC_WAIT_FOR_INCOMING_INTERRUPT: + wait_event_interruptible( + reg_data->interrupt_wait_queue, + (atomic_read(reg_data->incoming_signalled) != 0)); + break; + + case VSOC_DESCRIBE_REGION: + return do_vsoc_describe_region( + filp, + (struct vsoc_device_region __user *)arg); + + case VSOC_SELF_INTERRUPT: + atomic_set(reg_data->incoming_signalled, 1); + wake_up_interruptible(®_data->interrupt_wait_queue); + break; + + case VSOC_COND_WAIT: + return do_vsoc_cond_wait(filp, + (struct vsoc_cond_wait __user *)arg); + case VSOC_COND_WAKE: + return do_vsoc_cond_wake(filp, arg); + + default: + return -EINVAL; + } + return 0; +} + +static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len, + loff_t *poffset) +{ + __u32 area_off; + void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_to_user(buffer, area_p, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin) +{ + ssize_t area_len = 0; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, NULL); + switch (origin) { + case SEEK_SET: + break; + + case SEEK_CUR: + if (offset > 0 && offset + filp->f_pos < 0) + return -EOVERFLOW; + offset += filp->f_pos; + break; + + case SEEK_END: + if (offset > 0 && offset + area_len < 0) + return -EOVERFLOW; + offset += area_len; + break; + + case SEEK_DATA: + if (offset >= area_len) + return -EINVAL; + if (offset < 0) + offset = 0; + break; + + case SEEK_HOLE: + /* Next hole is always the end of the region, unless offset is + * beyond that + */ + if (offset < area_len) + offset = area_len; + break; + + default: + return -EINVAL; + } + + if (offset < 0 || offset > area_len) + return -EINVAL; + filp->f_pos = offset; + + return offset; +} + +static ssize_t vsoc_write(struct file *filp, const char *buffer, + size_t len, loff_t *poffset) +{ + __u32 area_off; + void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_from_user(area_p, buffer, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static irqreturn_t vsoc_interrupt(int irq, void *region_data_v) +{ + struct vsoc_region_data *region_data = + (struct vsoc_region_data *)region_data_v; + int reg_num = region_data - vsoc_dev.regions_data; + + if (unlikely(!region_data)) + return IRQ_NONE; + + if (unlikely(reg_num < 0 || + reg_num >= vsoc_dev.layout->region_count)) { + dev_err(&vsoc_dev.dev->dev, + "invalid irq @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) { + dev_err(&vsoc_dev.dev->dev, + "irq not aligned @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + wake_up_interruptible(®ion_data->interrupt_wait_queue); + return IRQ_HANDLED; +} + +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int result; + int i; + resource_size_t reg_size; + dev_t devt; + + vsoc_dev.dev = pdev; + result = pci_enable_device(pdev); + if (result) { + dev_err(&pdev->dev, + "pci_enable_device failed %s: error %d\n", + pci_name(pdev), result); + return result; + } + vsoc_dev.enabled_device = 1; + result = pci_request_regions(pdev, "vsoc"); + if (result < 0) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.requested_regions = 1; + /* Set up the control registers in BAR 0 */ + reg_size = pci_resource_len(pdev, REGISTER_BAR); + if (reg_size > MAX_REGISTER_BAR_LEN) + vsoc_dev.regs = + pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN); + else + vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size); + + if (!vsoc_dev.regs) { + dev_err(&pdev->dev, + "cannot ioremap registers of size %zu\n", + (size_t)reg_size); + vsoc_remove_device(pdev); + return -EBUSY; + } + + /* Map the shared memory in BAR 2 */ + vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR); + vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR); + + dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n", + (void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size); + /* TODO(ghartman): ioremap_wc should work here */ + vsoc_dev.kernel_mapped_shm = ioremap_nocache( + vsoc_dev.shm_phys_start, vsoc_dev.shm_size); + if (!vsoc_dev.kernel_mapped_shm) { + dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + + vsoc_dev.layout = + (struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm; + dev_info(&pdev->dev, "major_version: %d\n", + vsoc_dev.layout->major_version); + dev_info(&pdev->dev, "minor_version: %d\n", + vsoc_dev.layout->minor_version); + dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size); + dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count); + if (vsoc_dev.layout->major_version != + CURRENT_VSOC_LAYOUT_MAJOR_VERSION) { + dev_err(&vsoc_dev.dev->dev, + "driver supports only major_version %d\n", + CURRENT_VSOC_LAYOUT_MAJOR_VERSION); + vsoc_remove_device(pdev); + return -EBUSY; + } + result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count, + VSOC_DEV_NAME); + if (result) { + dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.major = MAJOR(devt); + cdev_init(&vsoc_dev.cdev, &vsoc_ops); + vsoc_dev.cdev.owner = THIS_MODULE; + result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count); + if (result) { + dev_err(&vsoc_dev.dev->dev, "cdev_add error\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.cdev_added = 1; + vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME); + if (IS_ERR(vsoc_dev.class)) { + dev_err(&vsoc_dev.dev->dev, "class_create failed\n"); + vsoc_remove_device(pdev); + return PTR_ERR(vsoc_dev.class); + } + vsoc_dev.class_added = 1; + vsoc_dev.regions = (struct vsoc_device_region *) + (vsoc_dev.kernel_mapped_shm + + vsoc_dev.layout->vsoc_region_desc_offset); + vsoc_dev.msix_entries = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL); + if (!vsoc_dev.msix_entries) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate msix_entries\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL); + if (!vsoc_dev.regions_data) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate regions' data\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + for (i = 0; i < vsoc_dev.layout->region_count; ++i) + vsoc_dev.msix_entries[i].entry = i; + + result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries, + vsoc_dev.layout->region_count); + if (result) { + dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result); + vsoc_remove_device(pdev); + return -ENOSPC; + } + /* Check that all regions are well formed */ + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + + if (!PAGE_ALIGNED(region->region_begin_offset) || + !PAGE_ALIGNED(region->region_end_offset)) { + dev_err(&vsoc_dev.dev->dev, + "region %d not aligned (%x:%x)", i, + region->region_begin_offset, + region->region_end_offset); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->region_begin_offset >= region->region_end_offset || + region->region_end_offset > vsoc_dev.shm_size) { + dev_err(&vsoc_dev.dev->dev, + "region %d offsets are wrong: %x %x %zx", + i, region->region_begin_offset, + region->region_end_offset, vsoc_dev.shm_size); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->managed_by >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "region %d has invalid owner: %u", + i, region->managed_by); + vsoc_remove_device(pdev); + return -EFAULT; + } + } + vsoc_dev.msix_enabled = 1; + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1; + const struct vsoc_signal_table_layout *h_to_g_signal_table = + ®ion->host_to_guest_signal_table; + const struct vsoc_signal_table_layout *g_to_h_signal_table = + ®ion->guest_to_host_signal_table; + + vsoc_dev.regions_data[i].name[name_sz] = '\0'; + memcpy(vsoc_dev.regions_data[i].name, region->device_name, + name_sz); + dev_info(&pdev->dev, "region %d name=%s\n", + i, vsoc_dev.regions_data[i].name); + init_waitqueue_head( + &vsoc_dev.regions_data[i].interrupt_wait_queue); + init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue); + vsoc_dev.regions_data[i].incoming_signalled = + vsoc_dev.kernel_mapped_shm + + region->region_begin_offset + + h_to_g_signal_table->interrupt_signalled_offset; + vsoc_dev.regions_data[i].outgoing_signalled = + vsoc_dev.kernel_mapped_shm + + region->region_begin_offset + + g_to_h_signal_table->interrupt_signalled_offset; + + result = request_irq( + vsoc_dev.msix_entries[i].vector, + vsoc_interrupt, 0, + vsoc_dev.regions_data[i].name, + vsoc_dev.regions_data + i); + if (result) { + dev_info(&pdev->dev, + "request_irq failed irq=%d vector=%d\n", + i, vsoc_dev.msix_entries[i].vector); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data[i].irq_requested = 1; + if (!device_create(vsoc_dev.class, NULL, + MKDEV(vsoc_dev.major, i), + NULL, vsoc_dev.regions_data[i].name)) { + dev_err(&vsoc_dev.dev->dev, "device_create failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.regions_data[i].device_created = 1; + } + return 0; +} + +/* + * This should undo all of the allocations in the probe function in reverse + * order. + * + * Notes: + * + * The device may have been partially initialized, so double check + * that the allocations happened. + * + * This function may be called multiple times, so mark resources as freed + * as they are deallocated. + */ +static void vsoc_remove_device(struct pci_dev *pdev) +{ + int i; + /* + * pdev is the first thing to be set on probe and the last thing + * to be cleared here. If it's NULL then there is no cleanup. + */ + if (!pdev || !vsoc_dev.dev) + return; + dev_info(&pdev->dev, "remove_device\n"); + if (vsoc_dev.regions_data) { + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + if (vsoc_dev.regions_data[i].device_created) { + device_destroy(vsoc_dev.class, + MKDEV(vsoc_dev.major, i)); + vsoc_dev.regions_data[i].device_created = 0; + } + if (vsoc_dev.regions_data[i].irq_requested) + free_irq(vsoc_dev.msix_entries[i].vector, NULL); + vsoc_dev.regions_data[i].irq_requested = 0; + } + kfree(vsoc_dev.regions_data); + vsoc_dev.regions_data = 0; + } + if (vsoc_dev.msix_enabled) { + pci_disable_msix(pdev); + vsoc_dev.msix_enabled = 0; + } + kfree(vsoc_dev.msix_entries); + vsoc_dev.msix_entries = 0; + vsoc_dev.regions = 0; + if (vsoc_dev.class_added) { + class_destroy(vsoc_dev.class); + vsoc_dev.class_added = 0; + } + if (vsoc_dev.cdev_added) { + cdev_del(&vsoc_dev.cdev); + vsoc_dev.cdev_added = 0; + } + if (vsoc_dev.major && vsoc_dev.layout) { + unregister_chrdev_region(MKDEV(vsoc_dev.major, 0), + vsoc_dev.layout->region_count); + vsoc_dev.major = 0; + } + vsoc_dev.layout = 0; + if (vsoc_dev.kernel_mapped_shm) { + pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm); + vsoc_dev.kernel_mapped_shm = 0; + } + if (vsoc_dev.regs) { + pci_iounmap(pdev, vsoc_dev.regs); + vsoc_dev.regs = 0; + } + if (vsoc_dev.requested_regions) { + pci_release_regions(pdev); + vsoc_dev.requested_regions = 0; + } + if (vsoc_dev.enabled_device) { + pci_disable_device(pdev); + vsoc_dev.enabled_device = 0; + } + /* Do this last: it indicates that the device is not initialized. */ + vsoc_dev.dev = NULL; +} + +static void __exit vsoc_cleanup_module(void) +{ + vsoc_remove_device(vsoc_dev.dev); + pci_unregister_driver(&vsoc_pci_driver); +} + +static int __init vsoc_init_module(void) +{ + int err = -ENOMEM; + + INIT_LIST_HEAD(&vsoc_dev.permissions); + mutex_init(&vsoc_dev.mtx); + + err = pci_register_driver(&vsoc_pci_driver); + if (err < 0) + return err; + return 0; +} + +static int vsoc_open(struct inode *inode, struct file *filp) +{ + /* Can't use vsoc_validate_filep because filp is still incomplete */ + int ret = vsoc_validate_inode(inode); + + if (ret) + return ret; + filp->private_data = + kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + return 0; +} + +static int vsoc_release(struct inode *inode, struct file *filp) +{ + struct vsoc_private_data *private_data = NULL; + struct fd_scoped_permission_node *node = NULL; + struct vsoc_device_region *owner_region_p = NULL; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + private_data = (struct vsoc_private_data *)filp->private_data; + if (!private_data) + return 0; + + node = private_data->fd_scoped_permission_node; + if (node) { + owner_region_p = vsoc_region_from_inode(inode); + if (owner_region_p->managed_by != VSOC_REGION_WHOLE) { + owner_region_p = + &vsoc_dev.regions[owner_region_p->managed_by]; + } + do_destroy_fd_scoped_permission_node(owner_region_p, node); + private_data->fd_scoped_permission_node = NULL; + } + kfree(private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * Returns the device relative offset and length of the area specified by the + * fd scoped permission. If there is no fd scoped permission set, a default + * permission covering the entire region is assumed, unless the region is owned + * by another one, in which case the default is a permission with zero size. + */ +static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset) +{ + __u32 off = 0; + ssize_t length = 0; + struct vsoc_device_region *region_p; + struct fd_scoped_permission *perm; + + region_p = vsoc_region_from_filep(filp); + off = region_p->region_begin_offset; + perm = &((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node->permission; + if (perm) { + off += perm->begin_offset; + length = perm->end_offset - perm->begin_offset; + } else if (region_p->managed_by == VSOC_REGION_WHOLE) { + /* No permission set and the regions is not owned by another, + * default to full region access. + */ + length = vsoc_device_region_size(region_p); + } else { + /* return zero length, access is denied. */ + length = 0; + } + if (area_offset) + *area_offset = off; + return length; +} + +static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long len = vma->vm_end - vma->vm_start; + __u32 area_off; + phys_addr_t mem_off; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + /* Add the requested offset */ + area_off += (vma->vm_pgoff << PAGE_SHIFT); + area_len -= (vma->vm_pgoff << PAGE_SHIFT); + if (area_len < len) + return -EINVAL; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + mem_off = shm_off_to_phys_addr(area_off); + if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT, + len, vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +module_init(vsoc_init_module); +module_exit(vsoc_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Greg Hartman <ghartman@google.com>"); +MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device"); +MODULE_VERSION("1.0"); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index b63dd2ef78b5..1f398d06f4ee 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { - unsigned int nscans = nsamples / cmd->scan_end_arg; - unsigned int scans_left = __comedi_nscans_left(s, nscans); + unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); unsigned int scan_pos = comedi_bytes_to_samples(s, async->scan_progress); unsigned long long samples_left = 0; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index c975f6e8be49..8f181caffca3 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1348,6 +1348,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) ack |= NISTC_INTA_ACK_AI_START; if (a_status & NISTC_AI_STATUS1_STOP) ack |= NISTC_INTA_ACK_AI_STOP; + if (a_status & NISTC_AI_STATUS1_OVER) + ack |= NISTC_INTA_ACK_AI_ERR; if (ack) ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); } diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig index c579141a7bed..c8871d0c0776 100644 --- a/drivers/staging/goldfish/Kconfig +++ b/drivers/staging/goldfish/Kconfig @@ -10,10 +10,3 @@ config GOLDFISH_SYNC ---help--- Emulated sync fences for the Goldfish Android Virtual Device -config MTD_GOLDFISH_NAND - tristate "Goldfish NAND device" - depends on GOLDFISH - depends on MTD - help - Drives the emulated NAND flash device on the Google Goldfish - Android virtual device. diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile index 0cf525588210..30db49141814 100644 --- a/drivers/staging/goldfish/Makefile +++ b/drivers/staging/goldfish/Makefile @@ -3,7 +3,6 @@ # obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o -obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o # and sync diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c deleted file mode 100644 index 623353db5a08..000000000000 --- a/drivers/staging/goldfish/goldfish_nand.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * drivers/mtd/devices/goldfish_nand.c - * - * Copyright (C) 2007 Google, Inc. - * Copyright (C) 2012 Intel, Inc. - * Copyright (C) 2013 Intel, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/io.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/ioport.h> -#include <linux/vmalloc.h> -#include <linux/mtd/mtd.h> -#include <linux/platform_device.h> -#include <linux/mutex.h> -#include <linux/goldfish.h> -#include <asm/div64.h> - -#include "goldfish_nand_reg.h" - -struct goldfish_nand { - /* lock protects access to the device registers */ - struct mutex lock; - unsigned char __iomem *base; - struct cmd_params *cmd_params; - size_t mtd_count; - struct mtd_info mtd[0]; -}; - -static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd, - enum nand_cmd cmd, u64 addr, u32 len, - void *ptr, u32 *rv) -{ - u32 cmdp; - struct goldfish_nand *nand = mtd->priv; - struct cmd_params *cps = nand->cmd_params; - unsigned char __iomem *base = nand->base; - - if (!cps) - return -1; - - switch (cmd) { - case NAND_CMD_ERASE: - cmdp = NAND_CMD_ERASE_WITH_PARAMS; - break; - case NAND_CMD_READ: - cmdp = NAND_CMD_READ_WITH_PARAMS; - break; - case NAND_CMD_WRITE: - cmdp = NAND_CMD_WRITE_WITH_PARAMS; - break; - default: - return -1; - } - cps->dev = mtd - nand->mtd; - cps->addr_high = (u32)(addr >> 32); - cps->addr_low = (u32)addr; - cps->transfer_size = len; - cps->data = (unsigned long)ptr; - writel(cmdp, base + NAND_COMMAND); - *rv = cps->result; - return 0; -} - -static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd, - u64 addr, u32 len, void *ptr) -{ - struct goldfish_nand *nand = mtd->priv; - u32 rv; - unsigned char __iomem *base = nand->base; - - mutex_lock(&nand->lock); - if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) { - writel(mtd - nand->mtd, base + NAND_DEV); - writel((u32)(addr >> 32), base + NAND_ADDR_HIGH); - writel((u32)addr, base + NAND_ADDR_LOW); - writel(len, base + NAND_TRANSFER_SIZE); - gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH); - writel(cmd, base + NAND_COMMAND); - rv = readl(base + NAND_RESULT); - } - mutex_unlock(&nand->lock); - return rv; -} - -static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - loff_t ofs = instr->addr; - u32 len = instr->len; - u32 rem; - - if (ofs + len > mtd->size) - goto invalid_arg; - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (len % mtd->writesize) - goto invalid_arg; - len = len / mtd->writesize * (mtd->writesize + mtd->oobsize); - - if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) { - pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n", - ofs, len, mtd->size, mtd->erasesize); - return -EIO; - } - - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - - return 0; - -invalid_arg: - pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n", - ofs, len, mtd->size, mtd->erasesize); - return -EINVAL; -} - -static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - u32 rem; - - if (ofs + ops->len > mtd->size) - goto invalid_arg; - if (ops->datbuf && ops->len && ops->len != mtd->writesize) - goto invalid_arg; - if (ops->ooblen + ops->ooboffs > mtd->oobsize) - goto invalid_arg; - - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (ops->datbuf) - ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, - ops->len, ops->datbuf); - ofs += mtd->writesize + ops->ooboffs; - if (ops->oobbuf) - ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, - ops->ooblen, ops->oobbuf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - u32 rem; - - if (ofs + ops->len > mtd->size) - goto invalid_arg; - if (ops->len && ops->len != mtd->writesize) - goto invalid_arg; - if (ops->ooblen + ops->ooboffs > mtd->oobsize) - goto invalid_arg; - - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (ops->datbuf) - ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, - ops->len, ops->datbuf); - ofs += mtd->writesize + ops->ooboffs; - if (ops->oobbuf) - ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, - ops->ooblen, ops->oobbuf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - u32 rem; - - if (from + len > mtd->size) - goto invalid_arg; - - rem = do_div(from, mtd->writesize); - if (rem) - goto invalid_arg; - from *= (mtd->writesize + mtd->oobsize); - - *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n", - from, len, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - u32 rem; - - if (to + len > mtd->size) - goto invalid_arg; - - rem = do_div(to, mtd->writesize); - if (rem) - goto invalid_arg; - to *= (mtd->writesize + mtd->oobsize); - - *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n", - to, len, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) -{ - u32 rem; - - if (ofs >= mtd->size) - goto invalid_arg; - - rem = do_div(ofs, mtd->erasesize); - if (rem) - goto invalid_arg; - ofs *= mtd->erasesize / mtd->writesize; - ofs *= (mtd->writesize + mtd->oobsize); - - return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL); - -invalid_arg: - pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", - ofs, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) -{ - u32 rem; - - if (ofs >= mtd->size) - goto invalid_arg; - - rem = do_div(ofs, mtd->erasesize); - if (rem) - goto invalid_arg; - ofs *= mtd->erasesize / mtd->writesize; - ofs *= (mtd->writesize + mtd->oobsize); - - if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1) - return -EIO; - return 0; - -invalid_arg: - pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", - ofs, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int nand_setup_cmd_params(struct platform_device *pdev, - struct goldfish_nand *nand) -{ - u64 paddr; - unsigned char __iomem *base = nand->base; - - nand->cmd_params = devm_kzalloc(&pdev->dev, - sizeof(struct cmd_params), GFP_KERNEL); - if (!nand->cmd_params) - return -1; - - paddr = __pa(nand->cmd_params); - writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); - writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW); - return 0; -} - -static int goldfish_nand_init_device(struct platform_device *pdev, - struct goldfish_nand *nand, int id) -{ - u32 name_len; - u32 result; - u32 flags; - unsigned char __iomem *base = nand->base; - struct mtd_info *mtd = &nand->mtd[id]; - char *name; - - mutex_lock(&nand->lock); - writel(id, base + NAND_DEV); - flags = readl(base + NAND_DEV_FLAGS); - name_len = readl(base + NAND_DEV_NAME_LEN); - mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE); - mtd->size = readl(base + NAND_DEV_SIZE_LOW); - mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32; - mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE); - mtd->oobavail = mtd->oobsize; - mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / - (mtd->writesize + mtd->oobsize) * mtd->writesize; - do_div(mtd->size, mtd->writesize + mtd->oobsize); - mtd->size *= mtd->writesize; - dev_dbg(&pdev->dev, - "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", - id, mtd->size, mtd->writesize, - mtd->oobsize, mtd->erasesize); - mutex_unlock(&nand->lock); - - mtd->priv = nand; - - name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL); - if (!name) - return -ENOMEM; - mtd->name = name; - - result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len, - name); - if (result != name_len) { - dev_err(&pdev->dev, - "goldfish_nand_init_device failed to get dev name %d != %d\n", - result, name_len); - return -ENODEV; - } - ((char *)mtd->name)[name_len] = '\0'; - - /* Setup the MTD structure */ - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - if (flags & NAND_DEV_FLAG_READ_ONLY) - mtd->flags &= ~MTD_WRITEABLE; - if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP) - nand_setup_cmd_params(pdev, nand); - - mtd->owner = THIS_MODULE; - mtd->_erase = goldfish_nand_erase; - mtd->_read = goldfish_nand_read; - mtd->_write = goldfish_nand_write; - mtd->_read_oob = goldfish_nand_read_oob; - mtd->_write_oob = goldfish_nand_write_oob; - mtd->_block_isbad = goldfish_nand_block_isbad; - mtd->_block_markbad = goldfish_nand_block_markbad; - - if (mtd_device_register(mtd, NULL, 0)) - return -EIO; - - return 0; -} - -static int goldfish_nand_probe(struct platform_device *pdev) -{ - u32 num_dev; - int i; - int err; - u32 num_dev_working; - u32 version; - struct resource *r; - struct goldfish_nand *nand; - unsigned char __iomem *base; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENODEV; - - base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); - if (!base) - return -ENOMEM; - - version = readl(base + NAND_VERSION); - if (version != NAND_VERSION_CURRENT) { - dev_err(&pdev->dev, - "goldfish_nand_init: version mismatch, got %d, expected %d\n", - version, NAND_VERSION_CURRENT); - return -ENODEV; - } - num_dev = readl(base + NAND_NUM_DEV); - if (num_dev == 0) - return -ENODEV; - - nand = devm_kzalloc(&pdev->dev, sizeof(*nand) + - sizeof(struct mtd_info) * num_dev, GFP_KERNEL); - if (!nand) - return -ENOMEM; - - mutex_init(&nand->lock); - nand->base = base; - nand->mtd_count = num_dev; - platform_set_drvdata(pdev, nand); - - num_dev_working = 0; - for (i = 0; i < num_dev; i++) { - err = goldfish_nand_init_device(pdev, nand, i); - if (err == 0) - num_dev_working++; - } - if (num_dev_working == 0) - return -ENODEV; - return 0; -} - -static int goldfish_nand_remove(struct platform_device *pdev) -{ - struct goldfish_nand *nand = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < nand->mtd_count; i++) { - if (nand->mtd[i].name) - mtd_device_unregister(&nand->mtd[i]); - } - return 0; -} - -static struct platform_driver goldfish_nand_driver = { - .probe = goldfish_nand_probe, - .remove = goldfish_nand_remove, - .driver = { - .name = "goldfish_nand" - } -}; - -module_platform_driver(goldfish_nand_driver); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h deleted file mode 100644 index 43aeba3a4c8f..000000000000 --- a/drivers/staging/goldfish/goldfish_nand_reg.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * drivers/mtd/devices/goldfish_nand_reg.h - * - * Copyright (C) 2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef GOLDFISH_NAND_REG_H -#define GOLDFISH_NAND_REG_H - -enum nand_cmd { - /* Write device name for NAND_DEV to NAND_DATA (vaddr) */ - NAND_CMD_GET_DEV_NAME, - NAND_CMD_READ, - NAND_CMD_WRITE, - NAND_CMD_ERASE, - /* NAND_RESULT is 1 if block is bad, 0 if it is not */ - NAND_CMD_BLOCK_BAD_GET, - NAND_CMD_BLOCK_BAD_SET, - NAND_CMD_READ_WITH_PARAMS, - NAND_CMD_WRITE_WITH_PARAMS, - NAND_CMD_ERASE_WITH_PARAMS -}; - -enum nand_dev_flags { - NAND_DEV_FLAG_READ_ONLY = 0x00000001, - NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002, -}; - -#define NAND_VERSION_CURRENT (1) - -enum nand_reg { - /* Global */ - NAND_VERSION = 0x000, - NAND_NUM_DEV = 0x004, - NAND_DEV = 0x008, - - /* Dev info */ - NAND_DEV_FLAGS = 0x010, - NAND_DEV_NAME_LEN = 0x014, - NAND_DEV_PAGE_SIZE = 0x018, - NAND_DEV_EXTRA_SIZE = 0x01c, - NAND_DEV_ERASE_SIZE = 0x020, - NAND_DEV_SIZE_LOW = 0x028, - NAND_DEV_SIZE_HIGH = 0x02c, - - /* Command */ - NAND_RESULT = 0x040, - NAND_COMMAND = 0x044, - NAND_DATA = 0x048, - NAND_DATA_HIGH = 0x100, - NAND_TRANSFER_SIZE = 0x04c, - NAND_ADDR_LOW = 0x050, - NAND_ADDR_HIGH = 0x054, - NAND_CMD_PARAMS_ADDR_LOW = 0x058, - NAND_CMD_PARAMS_ADDR_HIGH = 0x05c, -}; - -struct cmd_params { - u32 dev; - u32 addr_low; - u32 addr_high; - u32 transfer_size; - unsigned long data; - u32 result; -}; -#endif diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index 39f5261c9854..5cf5b7334089 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -824,7 +824,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req) if (req->rq_pool || !req->rq_reqbuf) return; - kfree(req->rq_reqbuf); + kvfree(req->rq_reqbuf); req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c index fdfeb42b2b8f..06ef26872462 100644 --- a/drivers/staging/speakup/kobjects.c +++ b/drivers/staging/speakup/kobjects.c @@ -831,7 +831,9 @@ static ssize_t message_show(struct kobject *kobj, struct msg_group_t *group = spk_find_msg_group(attr->attr.name); unsigned long flags; - BUG_ON(!group); + if (WARN_ON(!group)) + return -EINVAL; + spin_lock_irqsave(&speakup_info.spinlock, flags); retval = message_show_helper(buf, group->start, group->end); spin_unlock_irqrestore(&speakup_info.spinlock, flags); @@ -843,7 +845,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr, { struct msg_group_t *group = spk_find_msg_group(attr->attr.name); - BUG_ON(!group); + if (WARN_ON(!group)) + return -EINVAL; + return message_store_helper(buf, count, group); } diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c index c119f20dfd44..3f2ccf9d7358 100644 --- a/drivers/staging/unisys/visorhba/visorhba_main.c +++ b/drivers/staging/unisys/visorhba/visorhba_main.c @@ -792,7 +792,7 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) { struct scsi_device *scsidev; - unsigned char buf[36]; + unsigned char *buf; struct scatterlist *sg; unsigned int i; char *this_page; @@ -807,6 +807,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) if (cmdrsp->scsi.no_disk_result == 0) return; + buf = kzalloc(sizeof(char) * 36, GFP_KERNEL); + if (!buf) + return; + /* Linux scsi code wants a device at Lun 0 * to issue report luns, but we don't want * a disk there so we'll present a processor @@ -820,6 +824,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) if (scsi_sg_count(scsicmd) == 0) { memcpy(scsi_sglist(scsicmd), buf, cmdrsp->scsi.bufflen); + kfree(buf); return; } @@ -831,6 +836,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) memcpy(this_page, buf + bufind, sg[i].length); kunmap_atomic(this_page_orig); } + kfree(buf); } else { devdata = (struct visorhba_devdata *)scsidev->host->hostdata; for_each_vdisk_match(vdisk, devdata, scsidev) { diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index dbbe72c7e255..f78353ddeea5 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -2179,6 +2179,8 @@ static s32 Handle_Get_InActiveTime(struct host_if_drv *hif_drv, wid.type = WID_STR; wid.size = ETH_ALEN; wid.val = kmalloc(wid.size, GFP_KERNEL); + if (!wid.val) + return -ENOMEM; stamac = wid.val; memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c index 450af1b77f99..b2092c5ec7f3 100644 --- a/drivers/staging/wilc1000/linux_mon.c +++ b/drivers/staging/wilc1000/linux_mon.c @@ -251,6 +251,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb, if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) { skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr)); + if (!skb2) + return -ENOMEM; memcpy(skb_put(skb2, skb->len), skb->data, skb->len); diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 013a6240f193..c1ad0aea23b9 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp) hw->ident_sta_fw.variant) > HFA384x_FIRMWARE_VERSION(1, 5, 0)) { if (msg->scantype.data != P80211ENUM_scantype_active) - word = cpu_to_le16(msg->maxchanneltime.data); + word = msg->maxchanneltime.data; else word = 0; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 2e35db7f4aac..c15af2fcf2ba 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -276,12 +276,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, else ret = vfs_iter_read(fd, &iter, &pos); - kfree(bvec); - if (is_write) { if (ret < 0 || ret != data_length) { pr_err("%s() write returned %d\n", __func__, ret); - return (ret < 0 ? ret : -EINVAL); + if (ret >= 0) + ret = -EINVAL; } } else { /* @@ -294,17 +293,29 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, pr_err("%s() returned %d, expecting %u for " "S_ISBLK\n", __func__, ret, data_length); - return (ret < 0 ? ret : -EINVAL); + if (ret >= 0) + ret = -EINVAL; } } else { if (ret < 0) { pr_err("%s() returned %d for non S_ISBLK\n", __func__, ret); - return ret; + } else if (ret != data_length) { + /* + * Short read case: + * Probably some one truncate file under us. + * We must explicitly zero sg-pages to prevent + * expose uninizialized pages to userspace. + */ + if (ret < data_length) + ret += iov_iter_zero(data_length - ret, &iter); + else + ret = -EINVAL; } } } - return 1; + kfree(bvec); + return ret; } static sense_reason_t diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c index 5f955af4671a..db49c33264d5 100644 --- a/drivers/thermal/msm-tsens.c +++ b/drivers/thermal/msm-tsens.c @@ -89,6 +89,7 @@ #define TSENS_TM_CRITICAL_INT_EN BIT(2) #define TSENS_TM_UPPER_INT_EN BIT(1) #define TSENS_TM_LOWER_INT_EN BIT(0) +#define TSENS_TM_UPPER_LOWER_INT_DISABLE 0xffffffff #define TSENS_TM_UPPER_INT_MASK(n) (((n) & 0xffff0000) >> 16) #define TSENS_TM_LOWER_INT_MASK(n) ((n) & 0xffff) @@ -269,8 +270,8 @@ struct tsens_tm_device { uint32_t wd_bark_val; int tsens_irq; int tsens_critical_irq; - void *tsens_addr; - void *tsens_calib_addr; + void __iomem *tsens_addr; + void __iomem *tsens_calib_addr; int tsens_len; int calib_len; struct resource *res_tsens_mem; @@ -2079,6 +2080,7 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev) void __iomem *sensor_int_mask_addr; unsigned int srot_val; int crit_mask; + void __iomem *int_mask_addr; if (!tmdev) { pr_err("Invalid tsens device\n"); @@ -2104,6 +2106,10 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev) /*Update critical cycle monitoring*/ mb(); } + int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK + (tmdev->tsens_addr); + writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, + int_mask_addr); writel_relaxed(TSENS_TM_CRITICAL_INT_EN | TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN, TSENS_TM_INT_EN(tmdev->tsens_addr)); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 1246aa6fcab0..737635f0bec0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) @@ -532,6 +533,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) instance->cdev->updated = false; thermal_cdev_update(instance->cdev); } + mutex_unlock(&tz->lock); } /** diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c6ec9e64451b..4686e93aaf94 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -26,20 +26,25 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/serial_core.h> +#include <linux/of.h> /* Goldfish tty register's offsets */ -#define GOLDFISH_TTY_REG_BYTES_READY 0x04 -#define GOLDFISH_TTY_REG_CMD 0x08 -#define GOLDFISH_TTY_REG_DATA_PTR 0x10 -#define GOLDFISH_TTY_REG_DATA_LEN 0x14 -#define GOLDFISH_TTY_REG_DATA_PTR_HIGH 0x18 -#define GOLDFISH_TTY_REG_VERSION 0x20 +enum { + GOLDFISH_TTY_REG_BYTES_READY = 0x04, + GOLDFISH_TTY_REG_CMD = 0x08, + GOLDFISH_TTY_REG_DATA_PTR = 0x10, + GOLDFISH_TTY_REG_DATA_LEN = 0x14, + GOLDFISH_TTY_REG_DATA_PTR_HIGH = 0x18, + GOLDFISH_TTY_REG_VERSION = 0x20, +}; /* Goldfish tty commands */ -#define GOLDFISH_TTY_CMD_INT_DISABLE 0 -#define GOLDFISH_TTY_CMD_INT_ENABLE 1 -#define GOLDFISH_TTY_CMD_WRITE_BUFFER 2 -#define GOLDFISH_TTY_CMD_READ_BUFFER 3 +enum { + GOLDFISH_TTY_CMD_INT_DISABLE = 0, + GOLDFISH_TTY_CMD_INT_ENABLE = 1, + GOLDFISH_TTY_CMD_WRITE_BUFFER = 2, + GOLDFISH_TTY_CMD_READ_BUFFER = 3, +}; struct goldfish_tty { struct tty_port port; @@ -82,32 +87,35 @@ static void do_rw_io(struct goldfish_tty *qtty, } static void goldfish_tty_rw(struct goldfish_tty *qtty, - unsigned long addr, + const void *address_ptr, unsigned int count, int is_write) { dma_addr_t dma_handle; enum dma_data_direction dma_dir; + uintptr_t address; + address = (uintptr_t)address_ptr; dma_dir = (is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (qtty->version > 0) { /* * Goldfish TTY for Ranchu platform uses * physical addresses and DMA for read/write operations */ - unsigned long addr_end = addr + count; + uintptr_t address_end = address + count; - while (addr < addr_end) { - unsigned long pg_end = (addr & PAGE_MASK) + PAGE_SIZE; - unsigned long next = - pg_end < addr_end ? pg_end : addr_end; - unsigned long avail = next - addr; + while (address < address_end) { + uintptr_t page_end = (address & PAGE_MASK) + PAGE_SIZE; + uintptr_t next = page_end < address_end ? + page_end : address_end; + uintptr_t avail = next - address; /* * Map the buffer's virtual address to the DMA address * so the buffer can be accessed by the device. */ - dma_handle = dma_map_single(qtty->dev, (void *)addr, + dma_handle = dma_map_single(qtty->dev, (void *)address, avail, dma_dir); if (dma_mapping_error(qtty->dev, dma_handle)) { @@ -122,31 +130,30 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty, */ dma_unmap_single(qtty->dev, dma_handle, avail, dma_dir); - addr += avail; + address += avail; } } else { /* * Old style Goldfish TTY used on the Goldfish platform * uses virtual addresses. */ - do_rw_io(qtty, addr, count, is_write); + do_rw_io(qtty, address, count, is_write); } + } static void goldfish_tty_do_write(int line, const char *buf, unsigned int count) { struct goldfish_tty *qtty = &goldfish_ttys[line]; - unsigned long address = (unsigned long)(void *)buf; - goldfish_tty_rw(qtty, address, count, 1); + goldfish_tty_rw(qtty, buf, count, 1); } static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) { struct goldfish_tty *qtty = dev_id; void __iomem *base = qtty->base; - unsigned long address; unsigned char *buf; u32 count; @@ -155,9 +162,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) return IRQ_NONE; count = tty_prepare_flip_string(&qtty->port, &buf, count); - - address = (unsigned long)(void *)buf; - goldfish_tty_rw(qtty, address, count, 0); + goldfish_tty_rw(qtty, buf, count, 0); tty_schedule_flip(&qtty->port); return IRQ_HANDLED; @@ -181,6 +186,7 @@ static void goldfish_tty_shutdown(struct tty_port *port) static int goldfish_tty_open(struct tty_struct *tty, struct file *filp) { struct goldfish_tty *qtty = &goldfish_ttys[tty->index]; + return tty_port_open(&qtty->port, tty, filp); } @@ -210,6 +216,7 @@ static int goldfish_tty_chars_in_buffer(struct tty_struct *tty) { struct goldfish_tty *qtty = &goldfish_ttys[tty->index]; void __iomem *base = qtty->base; + return readl(base + GOLDFISH_TTY_REG_BYTES_READY); } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 9aff37186246..78bd121ecede 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1467,6 +1467,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) + * + * Some control dlci can stay in ADM mode with other dlci working just + * fine. In that case we can just keep the control dlci open after the + * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(unsigned long data) @@ -1480,8 +1484,15 @@ static void gsm_dlci_t1(unsigned long data) if (dlci->retries) { gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); - } else + } else if (!dlci->addr && gsm->control == (DM | PF)) { + if (debug & 8) + pr_info("DLCI %d opening in ADM mode.\n", + dlci->addr); + gsm_dlci_open(dlci); + } else { gsm_dlci_close(dlci); + } + break; case DLCI_CLOSING: dlci->retries--; @@ -1499,8 +1510,8 @@ static void gsm_dlci_t1(unsigned long data) * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages - * to the modem which should then reply with a UA, at which point we - * will move into open state. Opening is done asynchronously with retry + * to the modem which should then reply with a UA or ADM, at which point + * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. */ diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index e8dd296fb25b..c4383573cf66 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -608,6 +608,10 @@ static int omap_8250_startup(struct uart_port *port) up->lsr_saved_flags = 0; up->msr_saved_flags = 0; + /* Disable DMA for console UART */ + if (uart_console(port)) + up->dma = NULL; + if (up->dma) { ret = serial8250_request_dma(up); if (ret) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 7025f47fa284..746c76b358a0 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5300,6 +5300,17 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, /* + * BrainBoxes UC-260 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D21, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0E34, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + /* * Perle PCI-RAS cards */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 53e4d5056db7..e0277cf0bf58 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1783,6 +1783,7 @@ static void atmel_get_ip_name(struct uart_port *port) switch (version) { case 0x302: case 0x10213: + case 0x10302: dev_dbg(port->dev, "This version is usart\n"); atmel_port->is_usart = true; break; diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index fcf803ffad19..cdd2f942317c 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev) clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { - if (PTR_ERR(clk) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; + ret = PTR_ERR(clk); + if (ret == -EPROBE_DEFER) goto err_out; - } + uartclk = 0; + } else { + clk_prepare_enable(clk); + uartclk = clk_get_rate(clk); + } + + if (!uartclk) { dev_notice(&pdev->dev, "Using default clock frequency\n"); uartclk = s->chip->freq_std; - } else - uartclk = clk_get_rate(clk); + } /* Check input frequency */ if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 80d0ffe7abc1..8dd822feb972 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -847,6 +847,8 @@ static void sci_receive_chars(struct uart_port *port) /* Tell the rest of the system the news. New characters! */ tty_flip_buffer_push(tport); } else { + /* TTY buffers full; read from RX reg to prevent lockup */ + serial_port_in(port, SCxRDR); serial_port_in(port, SCxSR); /* dummy read */ sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); } @@ -1455,7 +1457,16 @@ static void sci_free_dma(struct uart_port *port) if (s->chan_rx) sci_rx_dma_release(s, false); } -#else + +static void sci_flush_buffer(struct uart_port *port) +{ + /* + * In uart_flush_buffer(), the xmit circular buffer has just been + * cleared, so we have to reset tx_dma_len accordingly. + */ + to_sci_port(port)->tx_dma_len = 0; +} +#else /* !CONFIG_SERIAL_SH_SCI_DMA */ static inline void sci_request_dma(struct uart_port *port) { } @@ -1463,7 +1474,9 @@ static inline void sci_request_dma(struct uart_port *port) static inline void sci_free_dma(struct uart_port *port) { } -#endif + +#define sci_flush_buffer NULL +#endif /* !CONFIG_SERIAL_SH_SCI_DMA */ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) { @@ -2203,6 +2216,7 @@ static struct uart_ops sci_uart_ops = { .break_ctl = sci_break_ctl, .startup = sci_startup, .shutdown = sci_shutdown, + .flush_buffer = sci_flush_buffer, .set_termios = sci_set_termios, .pm = sci_pm, .type = sci_type, diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 1bb629ab8ecc..a638c1738547 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1694,6 +1694,8 @@ static void release_tty(struct tty_struct *tty, int idx) if (tty->link) tty->link->port->itty = NULL; tty_buffer_cancel_work(tty->port); + if (tty->link) + tty_buffer_cancel_work(tty->link->port); tty_kref_put(tty->link); tty_kref_put(tty); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index e4f69bddcfb1..ff3286fc22d8 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1312,6 +1312,11 @@ static void csi_m(struct vc_data *vc) case 3: vc->vc_italic = 1; break; + case 21: + /* + * No console drivers support double underline, so + * convert it to a single underline. + */ case 4: vc->vc_underline = 1; break; @@ -1348,7 +1353,6 @@ static void csi_m(struct vc_data *vc) vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 1; break; - case 21: case 22: vc->vc_intensity = 1; break; @@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = 0x01010100; + vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = vc->vc_tab_stop[2] = vc->vc_tab_stop[3] = @@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) vc->vc_pos -= (vc->vc_x << 1); while (vc->vc_x < vc->vc_cols - 1) { vc->vc_x++; - if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) + if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) break; } vc->vc_pos += (vc->vc_x << 1); @@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); return; case 'Z': respond_ID(tty); @@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 939c6ad71068..57ee43512992 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -851,7 +851,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci) { ci_hdrc_gadget_destroy(ci); ci_hdrc_host_destroy(ci); - if (ci->is_otg) + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) ci_hdrc_otg_destroy(ci); } @@ -951,27 +951,35 @@ static int ci_hdrc_probe(struct platform_device *pdev) /* initialize role(s) before the interrupt is requested */ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { ret = ci_hdrc_host_init(ci); - if (ret) - dev_info(dev, "doesn't support host\n"); + if (ret) { + if (ret == -ENXIO) + dev_info(dev, "doesn't support host\n"); + else + goto deinit_phy; + } } if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { ret = ci_hdrc_gadget_init(ci); - if (ret) - dev_info(dev, "doesn't support gadget\n"); + if (ret) { + if (ret == -ENXIO) + dev_info(dev, "doesn't support gadget\n"); + else + goto deinit_host; + } } if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) { dev_err(dev, "no supported roles\n"); ret = -ENODEV; - goto deinit_phy; + goto deinit_gadget; } if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) { ret = ci_hdrc_otg_init(ci); if (ret) { dev_err(dev, "init otg fails, ret = %d\n", ret); - goto stop; + goto deinit_gadget; } } @@ -1036,7 +1044,12 @@ static int ci_hdrc_probe(struct platform_device *pdev) ci_extcon_unregister(ci); stop: - ci_role_destroy(ci); + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) + ci_hdrc_otg_destroy(ci); +deinit_gadget: + ci_hdrc_gadget_destroy(ci); +deinit_host: + ci_hdrc_host_destroy(ci); deinit_phy: ci_usb_phy_exit(ci); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 8e641b5893ed..29adabdb305f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); + kfree(dr); return ret; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 774c97bb1c08..4f1c6f8d4352 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -229,7 +229,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, /* Corsair Strafe RGB */ - { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 571c21727ff9..85fb6226770c 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1402,8 +1402,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work) if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); - hsotg->op_state = OTG_STATE_A_HOST; + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_hsotg_disconnect(hsotg); + spin_unlock_irqrestore(&hsotg->lock, flags); + + hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ dwc2_core_init(hsotg, false, -1); dwc2_enable_global_interrupts(hsotg); diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 2be268d2423d..03a926ebf34b 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -112,6 +112,10 @@ static int kdwc3_probe(struct platform_device *pdev) dev->dma_mask = &kdwc3_dma_mask; kdwc->clk = devm_clk_get(kdwc->dev, "usb"); + if (IS_ERR(kdwc->clk)) { + dev_err(kdwc->dev, "unable to get usb clock\n"); + return PTR_ERR(kdwc->clk); + } error = clk_prepare_enable(kdwc->clk); if (error < 0) { diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 2cd600a58fd7..3f59a2f8b84f 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -3487,6 +3487,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) if (on) { dev_dbg(mdwc->dev, "%s: turn on host\n", __func__); + pm_runtime_get_sync(mdwc->dev); mdwc->hs_phy->flags |= PHY_HOST_MODE; if (dwc->maximum_speed == USB_SPEED_SUPER) { mdwc->ss_phy->flags |= PHY_HOST_MODE; @@ -3495,7 +3496,6 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) } usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH); - pm_runtime_get_sync(mdwc->dev); dbg_event(0xFF, "StrtHost gync", atomic_read(&mdwc->dev->power.usage_count)); if (!IS_ERR(mdwc->vbus_reg)) diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index a412f024d834..61dfceb336d6 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -887,6 +887,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; + /* + * If instance is not created which is the case in power off charging + * mode, dev will be NULL. Hence return error if it is the case. + */ + if (!dev) + return -ENODEV; /* * printk(KERN_INFO "acc_ctrlrequest " * "%02x.%02x v%04x i%04x l%u\n", diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 1ffde9c5408c..9edc01692142 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -68,18 +68,27 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); static int __must_check __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); +static LIST_HEAD(inst_list); + /* ffs instance status */ -static DEFINE_MUTEX(ffs_ep_lock); -static bool ffs_inst_exist; -static struct f_fs_opts *g_opts; +#define INST_NAME_SIZE 16 -/* Free instance structures */ -static void ffs_inst_clean(struct f_fs_opts *opts); -static void ffs_inst_clean_delay(void); -static int ffs_inst_exist_check(void); +struct ffs_inst_status { + char inst_name[INST_NAME_SIZE]; + struct list_head list; + struct mutex ffs_lock; + bool inst_exist; + struct f_fs_opts *opts; + struct ffs_data *ffs_data; +}; -/* Global ffs_data pointer */ -static struct ffs_data *g_ffs_data; +/* Free instance structures */ +static void ffs_inst_clean(struct f_fs_opts *opts, + const char *inst_name); +static void ffs_inst_clean_delay(const char *inst_name); +static int ffs_inst_exist_check(const char *inst_name); +static struct ffs_inst_status *name_to_inst_status( + const char *inst_name, bool create_inst); /* The function structure ***************************************************/ @@ -300,7 +309,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, ffs_log("enter:len %zu state %d setup_state %d flags %lu", len, ffs->state, ffs->setup_state, ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -490,7 +499,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf, ffs_log("enter:len %zu state %d setup_state %d flags %lu", len, ffs->state, ffs->setup_state, ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -601,7 +610,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file) ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -643,7 +652,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -668,7 +677,7 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait) ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -799,6 +808,10 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name, atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE"); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); + if (ret < 0) + return ret; + smp_mb__before_atomic(); retry: if (atomic_read(&epfile->error)) @@ -1085,7 +1098,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); if (ret < 0) return ret; @@ -1143,16 +1156,11 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct ffs_io_data io_data, *p = &io_data; ssize_t res; - int ret; ENTER(); ffs_log("enter"); - ret = ffs_inst_exist_check(); - if (ret < 0) - return ret; - if (!is_sync_kiocb(kiocb)) { p = kmalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) @@ -1189,16 +1197,11 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) { struct ffs_io_data io_data, *p = &io_data; ssize_t res; - int ret; ENTER(); ffs_log("enter"); - ret = ffs_inst_exist_check(); - if (ret < 0) - return ret; - if (!is_sync_kiocb(kiocb)) { p = kmalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) @@ -1275,7 +1278,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); if (ret < 0) return ret; @@ -1583,6 +1586,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, int ret; void *ffs_dev; struct ffs_data *ffs; + struct ffs_inst_status *inst_status; ENTER(); @@ -1612,6 +1616,18 @@ ffs_fs_mount(struct file_system_type *t, int flags, ffs->private_data = ffs_dev; data.ffs_data = ffs; + inst_status = name_to_inst_status(ffs->dev_name, false); + if (IS_ERR(inst_status)) { + ffs_log("failed to find instance (%s)\n", + ffs->dev_name); + return ERR_PTR(-EINVAL); + } + + /* Store ffs to global status structure */ + ffs_dev_lock(); + inst_status->ffs_data = ffs; + ffs_dev_unlock(); + rv = mount_nodev(t, flags, &data, ffs_sb_fill); if (IS_ERR(rv) && data.ffs_data) { ffs_release_dev(data.ffs_data); @@ -1711,6 +1727,9 @@ static void ffs_data_opened(struct ffs_data *ffs) static void ffs_data_put(struct ffs_data *ffs) { + struct ffs_inst_status *inst_status; + const char *dev_name; + ENTER(); ffs_log("enter"); @@ -1718,16 +1737,20 @@ static void ffs_data_put(struct ffs_data *ffs) smp_mb__before_atomic(); if (unlikely(atomic_dec_and_test(&ffs->ref))) { pr_info("%s(): freeing\n", __func__); - /* Clear g_ffs_data */ - ffs_dev_lock(); - g_ffs_data = NULL; - ffs_dev_unlock(); + /* Clear ffs from global structure */ + inst_status = name_to_inst_status(ffs->dev_name, false); + if (!IS_ERR(inst_status)) { + ffs_dev_lock(); + inst_status->ffs_data = NULL; + ffs_dev_unlock(); + } ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || waitqueue_active(&ffs->ep0req_completion.wait)); - kfree(ffs->dev_name); + dev_name = ffs->dev_name; kfree(ffs); - ffs_inst_clean_delay(); + ffs_inst_clean_delay(dev_name); + kfree(dev_name); } ffs_log("exit"); @@ -1792,11 +1815,6 @@ static struct ffs_data *ffs_data_new(void) /* XXX REVISIT need to update it in some places, or do we? */ ffs->ev.can_stall = 1; - /* Store ffs to g_ffs_data */ - ffs_dev_lock(); - g_ffs_data = ffs; - ffs_dev_unlock(); - ffs_log("exit"); return ffs; @@ -3684,79 +3702,146 @@ static struct config_item_type ffs_func_type = { /* Function registration interface ******************************************/ -static int ffs_inst_exist_check(void) +static struct ffs_inst_status *name_to_inst_status( + const char *inst_name, bool create_inst) +{ + struct ffs_inst_status *inst_status; + + list_for_each_entry(inst_status, &inst_list, list) { + if (!strncasecmp(inst_status->inst_name, + inst_name, strlen(inst_name))) + return inst_status; + } + + if (!create_inst) + return ERR_PTR(-ENODEV); + + inst_status = kzalloc(sizeof(struct ffs_inst_status), + GFP_KERNEL); + if (!inst_status) + return ERR_PTR(-ENOMEM); + + mutex_init(&inst_status->ffs_lock); + snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name); + list_add_tail(&inst_status->list, &inst_list); + + return inst_status; +} + +static int ffs_inst_exist_check(const char *inst_name) { - mutex_lock(&ffs_ep_lock); + struct ffs_inst_status *inst_status; - if (unlikely(ffs_inst_exist == false)) { - mutex_unlock(&ffs_ep_lock); + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { pr_err_ratelimited( - "%s: f_fs instance freed already.\n", - __func__); + "%s: failed to find instance (%s)\n", + __func__, inst_name); + return -ENODEV; + } + + mutex_lock(&inst_status->ffs_lock); + + if (unlikely(inst_status->inst_exist == false)) { + mutex_unlock(&inst_status->ffs_lock); + pr_err_ratelimited( + "%s: f_fs instance (%s) has been freed already.\n", + __func__, inst_name); return -ENODEV; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); return 0; } -static void ffs_inst_clean(struct f_fs_opts *opts) +static void ffs_inst_clean(struct f_fs_opts *opts, + const char *inst_name) { - g_opts = NULL; + struct ffs_inst_status *inst_status; + + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { + pr_err_ratelimited( + "%s: failed to find instance (%s)\n", + __func__, inst_name); + return; + } + + inst_status->opts = NULL; + ffs_dev_lock(); _ffs_free_dev(opts->dev); ffs_dev_unlock(); kfree(opts); } -static void ffs_inst_clean_delay(void) +static void ffs_inst_clean_delay(const char *inst_name) { - mutex_lock(&ffs_ep_lock); + struct ffs_inst_status *inst_status; - if (unlikely(ffs_inst_exist == false)) { - if (g_opts) { - ffs_inst_clean(g_opts); + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { + pr_err_ratelimited( + "%s: failed to find (%s) instance\n", + __func__, inst_name); + return; + } + + mutex_lock(&inst_status->ffs_lock); + + if (unlikely(inst_status->inst_exist == false)) { + if (inst_status->opts) { + ffs_inst_clean(inst_status->opts, inst_name); pr_err_ratelimited("%s: Delayed free memory\n", __func__); } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); return; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); } static void ffs_free_inst(struct usb_function_instance *f) { struct f_fs_opts *opts; + struct ffs_inst_status *inst_status; opts = to_f_fs_opts(f); - mutex_lock(&ffs_ep_lock); + inst_status = name_to_inst_status(opts->dev->name, false); + if (IS_ERR(inst_status)) { + ffs_log("failed to find (%s) instance\n", + opts->dev->name); + return; + } + + mutex_lock(&inst_status->ffs_lock); if (opts->dev->ffs_data && atomic_read(&opts->dev->ffs_data->opened)) { - ffs_inst_exist = false; - mutex_unlock(&ffs_ep_lock); - ffs_log("%s: Dev is open, free mem when dev close\n", - __func__); + inst_status->inst_exist = false; + mutex_unlock(&inst_status->ffs_lock); + ffs_log("Dev is open, free mem when dev (%s) close\n", + opts->dev->name); return; } - ffs_inst_clean(opts); - ffs_inst_exist = false; - g_opts = NULL; - mutex_unlock(&ffs_ep_lock); + ffs_inst_clean(opts, opts->dev->name); + inst_status->inst_exist = false; + mutex_unlock(&inst_status->ffs_lock); } #define MAX_INST_NAME_LEN 40 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) { - struct f_fs_opts *opts; + struct f_fs_opts *opts, *opts_prev; + struct ffs_data *ffs_data_tmp; char *ptr; const char *tmp; int name_len, ret; + struct ffs_inst_status *inst_status; name_len = strlen(name) + 1; if (name_len > MAX_INST_NAME_LEN) @@ -3766,13 +3851,22 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) if (!ptr) return -ENOMEM; - mutex_lock(&ffs_ep_lock); - if (g_opts) { - mutex_unlock(&ffs_ep_lock); - ffs_log("%s: prev inst do not freed yet\n", __func__); + inst_status = name_to_inst_status(ptr, true); + if (IS_ERR(inst_status)) { + ffs_log("failed to create status struct for (%s) instance\n", + ptr); + return -EINVAL; + } + + mutex_lock(&inst_status->ffs_lock); + opts_prev = inst_status->opts; + if (opts_prev) { + mutex_unlock(&inst_status->ffs_lock); + ffs_log("instance (%s): prev inst do not freed yet\n", + inst_status->inst_name); return -EBUSY; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); opts = to_f_fs_opts(fi); tmp = NULL; @@ -3794,8 +3888,9 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) * ffs_private_data also need to update new allocated opts->dev * address. */ - if (g_ffs_data) - opts->dev->ffs_data = g_ffs_data; + ffs_data_tmp = inst_status->ffs_data; + if (ffs_data_tmp) + opts->dev->ffs_data = ffs_data_tmp; if (opts->dev->ffs_data) opts->dev->ffs_data->private_data = opts->dev; @@ -3804,10 +3899,10 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) kfree(tmp); - mutex_lock(&ffs_ep_lock); - ffs_inst_exist = true; - g_opts = opts; - mutex_unlock(&ffs_ep_lock); + mutex_lock(&inst_status->ffs_lock); + inst_status->inst_exist = true; + inst_status->opts = opts; + mutex_unlock(&inst_status->ffs_lock); return 0; } @@ -4212,6 +4307,20 @@ module_init(ffs_init); static void __exit ffs_exit(void) { + struct ffs_inst_status *inst_status, *inst_status_tmp = NULL; + + list_for_each_entry(inst_status, &inst_list, list) { + if (inst_status_tmp) { + list_del(&inst_status_tmp->list); + kfree(inst_status_tmp); + } + inst_status_tmp = inst_status; + } + if (inst_status_tmp) { + list_del(&inst_status_tmp->list); + kfree(inst_status_tmp); + } + if (ffs_ipc_log) { ipc_log_context_destroy(ffs_ipc_log); ffs_ipc_log = NULL; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ee579ba2b59e..a5dae5bb62ab 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); + + /* + * Remove this from list to protect it from beign free() + * while host disables our function + */ + list_del(&list->list); + req = list->req; count = min_t(unsigned int, count, req->actual - list->pos); spin_unlock_irqrestore(&hidg->spinlock, flags); @@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, * call, taking into account its current read position. */ if (list->pos == req->actual) { - spin_lock_irqsave(&hidg->spinlock, flags); - list_del(&list->list); kfree(list); - spin_unlock_irqrestore(&hidg->spinlock, flags); req->length = hidg->report_length; ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); - if (ret < 0) + if (ret < 0) { + free_ep_req(hidg->out_ep, req); return ret; + } + } else { + spin_lock_irqsave(&hidg->spinlock, flags); + list_add(&list->list, &hidg->completed_out_req); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + wake_up(&hidg->read_queue); } return count; @@ -490,14 +502,18 @@ static void hidg_disable(struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); struct f_hidg_req_list *list, *next; + unsigned long flags; usb_ep_disable(hidg->in_ep); usb_ep_disable(hidg->out_ep); + spin_lock_irqsave(&hidg->spinlock, flags); list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { + free_ep_req(hidg->out_ep, list->req); list_del(&list->list); kfree(list); } + spin_unlock_irqrestore(&hidg->spinlock, flags); } static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 79f554f1fb23..1fcdbdc35cd1 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -210,12 +210,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, return alloc_ep_req(ep, length, length); } -static void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static const uint8_t f_midi_cin_length[] = { 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 }; diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9f3ced62d916..67b243989938 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -303,12 +303,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) return alloc_ep_req(ep, len, ss->buflen); } -void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) { int value; diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 15f180904f8a..5ed90b437f18 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h @@ -59,7 +59,6 @@ void lb_modexit(void); int lb_modinit(void); /* common utilities */ -void free_ep_req(struct usb_ep *ep, struct usb_request *req); void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, struct usb_ep *iso_in, struct usb_ep *iso_out); diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c index c6276f0268ae..907f8144813c 100644 --- a/drivers/usb/gadget/u_f.c +++ b/drivers/usb/gadget/u_f.c @@ -11,16 +11,18 @@ * published by the Free Software Foundation. */ -#include <linux/usb/gadget.h> #include "u_f.h" +#include <linux/usb/ch9.h> -struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) +struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = len ?: default_len; + if (usb_endpoint_dir_out(ep->desc)) + req->length = usb_ep_align(ep, req->length); req->buf = kmalloc(req->length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 1d5f0eb68552..69a1d10df04f 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -16,6 +16,8 @@ #ifndef __U_F_H__ #define __U_F_H__ +#include <linux/usb/gadget.h> + /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 #define vla_group_size(groupname) groupname##__next @@ -45,8 +47,26 @@ struct usb_ep; struct usb_request; -struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len); - -#endif /* __U_F_H__ */ +/** + * alloc_ep_req - returns a usb_request allocated by the gadget driver and + * allocates the request's buffer. + * + * @ep: the endpoint to allocate a usb_request + * @len: usb_requests's buffer suggested size + * @default_len: used if @len is not provided, ie, is 0 + * + * In case @ep direction is OUT, the @len will be aligned to ep's + * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use + * usb_requests's length (req->length) to refer to the allocated buffer size. + * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req(). + */ +struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len); +/* Frees a usb_request previously allocated by alloc_ep_req() */ +static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} +#endif /* __U_F_H__ */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index ccb9c213cc9f..e9bd8d4abca0 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev) bdc->dev = dev; dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq); - temp = bdc_readl(bdc->regs, BDC_BDCSC); + temp = bdc_readl(bdc->regs, BDC_BDCCAP1); if ((temp & BDC_P64) && !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { dev_dbg(bdc->dev, "Using 64-bit address\n"); diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 02968842b359..708e36f530d8 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c @@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret) { dev_err(&pci->dev, "couldn't add resources to bdc device\n"); + platform_device_put(bdc); return ret; } diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 8080a11947b7..eb876ed96861 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2105,16 +2105,13 @@ static int dummy_hub_control( } break; case USB_PORT_FEAT_POWER: - if (hcd->speed == HCD_USB3) { - if (dum_hcd->port_status & USB_PORT_STAT_POWER) - dev_dbg(dummy_dev(dum_hcd), - "power-off\n"); - } else - if (dum_hcd->port_status & - USB_SS_PORT_STAT_POWER) - dev_dbg(dummy_dev(dum_hcd), - "power-off\n"); - /* FALLS THROUGH */ + dev_dbg(dummy_dev(dum_hcd), "power-off\n"); + if (hcd->speed == HCD_USB3) + dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER; + else + dum_hcd->port_status &= ~USB_PORT_STAT_POWER; + set_link_state(dum_hcd); + break; default: dum_hcd->port_status &= ~(1 << wValue); set_link_state(dum_hcd); @@ -2285,14 +2282,13 @@ static int dummy_hub_control( if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); - set_link_state(dum_hcd); } } else if ((dum_hcd->port_status & USB_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); - set_link_state(dum_hcd); } + set_link_state(dum_hcd); } break; case GetPortErrorCount: diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 408c8eca2bbe..61fb325e4267 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -437,7 +437,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, - .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = DEV_PM_OPS, diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 060d78d53118..d0c7f4949f6f 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -82,6 +82,8 @@ struct mon_reader_text { wait_queue_head_t wait; int printf_size; + size_t printf_offset; + size_t printf_togo; char *printf_buf; struct mutex printf_lock; @@ -373,73 +375,103 @@ err_alloc: return rc; } -/* - * For simplicity, we read one record in one system call and throw out - * what does not fit. This means that the following does not work: - * dd if=/dbg/usbmon/0t bs=10 - * Also, we do not allow seeks and do not bother advancing the offset. - */ +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, + char __user * const buf, const size_t nbytes) +{ + const size_t togo = min(nbytes, rp->printf_togo); + + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) + return -EFAULT; + rp->printf_togo -= togo; + rp->printf_offset += togo; + return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_t(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - - mon_text_read_head_t(rp, &ptr, ep); - mon_text_read_statset(rp, &ptr, ep); - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_t(rp, &ptr, ep); + mon_text_read_statset(rp, &ptr, ep); + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_u(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - mon_text_read_head_u(rp, &ptr, ep); - if (ep->type == 'E') { - mon_text_read_statset(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { - mon_text_read_isostat(rp, &ptr, ep); - mon_text_read_isodesc(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { - mon_text_read_intstat(rp, &ptr, ep); - } else { - mon_text_read_statset(rp, &ptr, ep); + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_u(rp, &ptr, ep); + if (ep->type == 'E') { + mon_text_read_statset(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + mon_text_read_isostat(rp, &ptr, ep); + mon_text_read_isodesc(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + mon_text_read_intstat(rp, &ptr, ep); + } else { + mon_text_read_statset(rp, &ptr, ep); + } + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); } - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 2bc3c6fa417a..a76a6577ee98 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -473,10 +473,7 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) } if (suspend) { - if (!phy->cable_connected) - writel_relaxed(0x00, - phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); - else + if (phy->cable_connected) msm_ssusb_qmp_enable_autonomous(phy, 1); /* Make sure above write completed with PHY */ @@ -540,6 +537,10 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy, struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, phy); + writel_relaxed(0x00, + phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + dev_dbg(uphy->dev, "QMP phy disconnect notification\n"); dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected); phy->cable_connected = false; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a4ab4fdf5ba3..64a4427678b0 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -151,6 +151,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 64fe9dc25ed4..a224c7a3ce09 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, + { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 543d2801632b..76a10b222ff9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -922,6 +922,9 @@ /* * RT Systems programming cables for various ham radios */ +/* This device uses the VID of FTDI */ +#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */ + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ #define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ @@ -1441,6 +1444,12 @@ #define FTDI_CINTERION_MC55I_PID 0xA951 /* + * Product: FirmwareHubEmulator + * Manufacturer: Harman Becker Automotive Systems + */ +#define FTDI_FHE_PID 0xA9A0 + +/* * Product: Comet Caller ID decoder * Manufacturer: Crucible Technologies */ diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 091e8ec7a6c0..962bb6376b0c 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -1953,6 +1953,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag) bcb->CDB[0] = 0xEF; result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); + if (us->srb != NULL) + scsi_set_resid(us->srb, 0); info->BIN_FLAG = flag; kfree(buf); @@ -2306,21 +2308,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb) static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) { - int result = 0; + int result = USB_STOR_XFER_GOOD; struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) { + if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) result = ene_init(us); - } else { + if (result == USB_STOR_XFER_GOOD) { + result = USB_STOR_TRANSPORT_ERROR; if (info->SD_Status.Ready) result = sd_scsi_irp(us, srb); if (info->MS_Status.Ready) result = ms_scsi_irp(us, srb); } - return 0; + return result; } static struct scsi_host_template ene_ub6250_host_template; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index de7214ae4fed..6cac8f26b97a 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -1052,7 +1052,7 @@ static int uas_post_reset(struct usb_interface *intf) return 0; err = uas_configure_endpoints(devinfo); - if (err && err != ENODEV) + if (err && err != -ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index c10eceb76c39..1a34d2a89de6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2142,6 +2142,13 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), +/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ +UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported-by George Cherian <george.cherian@cavium.com> */ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, "JMicron", diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ad2146a9ab2d..675819a1af37 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -173,8 +173,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { - if (poll->wqh) - remove_wait_queue(poll->wqh, &poll->wait); + vhost_poll_stop(poll); ret = -EINVAL; } diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 517f565b65d7..598ec7545e84 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -409,7 +409,10 @@ static const char *vgacon_startup(void) vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = - { .name = "ega", .start = 0x3B0, .end = 0x3BF }; + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; @@ -417,9 +420,15 @@ static const char *vgacon_startup(void) &ega_console_resource); } else { static struct resource mda1_console_resource = - { .name = "mda", .start = 0x3B0, .end = 0x3BB }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BB }; static struct resource mda2_console_resource = - { .name = "mda", .start = 0x3BF, .end = 0x3BF }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3BF, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; @@ -441,15 +450,21 @@ static const char *vgacon_startup(void) vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { - static struct resource ega_console_resource - = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; + static struct resource ega_console_resource = + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { - static struct resource vga_console_resource - = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; + static struct resource vga_console_resource = + { .name = "vga+", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, @@ -493,7 +508,10 @@ static const char *vgacon_startup(void) } } else { static struct resource cga_console_resource = - { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; + { .name = "cga", + .flags = IORESOURCE_IO, + .start = 0x3D4, + .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 9362424c2340..924b3d6c3e9b 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) if (err) return err; - framesize = fb->panel->mode.xres * fb->panel->mode.yres * - fb->panel->bpp / 8; + framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres * + fb->panel->bpp / 8); fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, &dma, GFP_KERNEL); if (!fb->fb.screen_base) diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 1e56b50e4082..88adb2970b44 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -38,11 +38,58 @@ enum { FB_SET_BLANK = 0x18, FB_GET_PHYS_WIDTH = 0x1c, FB_GET_PHYS_HEIGHT = 0x20, + FB_GET_FORMAT = 0x24, FB_INT_VSYNC = 1U << 0, FB_INT_BASE_UPDATE_DONE = 1U << 1 }; +/* These values *must* match the platform definitions found under + * <system/graphics.h> + */ +enum { + HAL_PIXEL_FORMAT_RGBA_8888 = 1, + HAL_PIXEL_FORMAT_RGBX_8888 = 2, + HAL_PIXEL_FORMAT_RGB_888 = 3, + HAL_PIXEL_FORMAT_RGB_565 = 4, + HAL_PIXEL_FORMAT_BGRA_8888 = 5, +}; + +struct framebuffer_config { + u8 bytes_per_pixel; + u8 red_offset; + u8 red_length; + u8 green_offset; + u8 green_length; + u8 blue_offset; + u8 blue_length; + u8 transp_offset; + u8 transp_length; +}; + +enum { + CHAR_BIT = 8 +}; + +static const struct framebuffer_config *get_fb_config_from_format(int format) +{ + static const struct framebuffer_config fb_configs[] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Invalid, assume RGB_565 */ + { 4, 0, 8, 8, 8, 16, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_RGBA_8888 */ + { 4, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGBX_8888 */ + { 3, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_888 */ + { 2, 11, 5, 5, 6, 0, 5, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_565 */ + { 4, 16, 8, 8, 8, 0, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_BGRA_8888 */ + }; + + if (format > 0 && + format < sizeof(fb_configs) / sizeof(struct framebuffer_config)) { + return &fb_configs[format]; + } + + return &fb_configs[HAL_PIXEL_FORMAT_RGB_565]; /* legacy default */ +} + struct goldfish_fb { void __iomem *reg_base; int irq; @@ -125,8 +172,10 @@ static int goldfish_fb_check_var(struct fb_var_screeninfo *var, static int goldfish_fb_set_par(struct fb_info *info) { struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb); + if (fb->rotation != fb->fb.var.rotate) { - info->fix.line_length = info->var.xres * 2; + info->fix.line_length = info->var.xres * + (fb->fb.var.bits_per_pixel / CHAR_BIT); fb->rotation = fb->fb.var.rotate; writel(fb->rotation, fb->reg_base + FB_SET_ROTATION); } @@ -143,19 +192,24 @@ static int goldfish_fb_pan_display(struct fb_var_screeninfo *var, spin_lock_irqsave(&fb->lock, irq_flags); base_update_count = fb->base_update_count; - writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset, - fb->reg_base + FB_SET_BASE); + writel(fb->fb.fix.smem_start + + fb->fb.var.xres * + (fb->fb.var.bits_per_pixel / CHAR_BIT) * + var->yoffset, + fb->reg_base + FB_SET_BASE); spin_unlock_irqrestore(&fb->lock, irq_flags); wait_event_timeout(fb->wait, fb->base_update_count != base_update_count, HZ / 15); if (fb->base_update_count == base_update_count) - pr_err("goldfish_fb_pan_display: timeout waiting for base update\n"); + pr_err("goldfish_fb_pan_display: timeout waiting for " + "base update\n"); return 0; } static int goldfish_fb_blank(int blank, struct fb_info *info) { struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb); + switch (blank) { case FB_BLANK_NORMAL: writel(1, fb->reg_base + FB_SET_BLANK); @@ -186,8 +240,10 @@ static int goldfish_fb_probe(struct platform_device *pdev) struct resource *r; struct goldfish_fb *fb; size_t framesize; - u32 width, height; + u32 width, height, format; + int bytes_per_pixel; dma_addr_t fbpaddr; + const struct framebuffer_config *fb_config; fb = kzalloc(sizeof(*fb), GFP_KERNEL); if (fb == NULL) { @@ -217,13 +273,20 @@ static int goldfish_fb_probe(struct platform_device *pdev) width = readl(fb->reg_base + FB_GET_WIDTH); height = readl(fb->reg_base + FB_GET_HEIGHT); + format = readl(fb->reg_base + FB_GET_FORMAT); + fb_config = get_fb_config_from_format(format); + if (!fb_config) { + ret = -EINVAL; + goto err_no_irq; + } + bytes_per_pixel = fb_config->bytes_per_pixel; fb->fb.fbops = &goldfish_fb_ops; fb->fb.flags = FBINFO_FLAG_DEFAULT; fb->fb.pseudo_palette = fb->cmap; fb->fb.fix.type = FB_TYPE_PACKED_PIXELS; fb->fb.fix.visual = FB_VISUAL_TRUECOLOR; - fb->fb.fix.line_length = width * 2; + fb->fb.fix.line_length = width * bytes_per_pixel; fb->fb.fix.accel = FB_ACCEL_NONE; fb->fb.fix.ypanstep = 1; @@ -231,20 +294,22 @@ static int goldfish_fb_probe(struct platform_device *pdev) fb->fb.var.yres = height; fb->fb.var.xres_virtual = width; fb->fb.var.yres_virtual = height * 2; - fb->fb.var.bits_per_pixel = 16; + fb->fb.var.bits_per_pixel = bytes_per_pixel * CHAR_BIT; fb->fb.var.activate = FB_ACTIVATE_NOW; fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); fb->fb.var.pixclock = 0; - fb->fb.var.red.offset = 11; - fb->fb.var.red.length = 5; - fb->fb.var.green.offset = 5; - fb->fb.var.green.length = 6; - fb->fb.var.blue.offset = 0; - fb->fb.var.blue.length = 5; + fb->fb.var.red.offset = fb_config->red_offset; + fb->fb.var.red.length = fb_config->red_length; + fb->fb.var.green.offset = fb_config->green_offset; + fb->fb.var.green.length = fb_config->green_length; + fb->fb.var.blue.offset = fb_config->blue_offset; + fb->fb.var.blue.length = fb_config->blue_length; + fb->fb.var.transp.offset = fb_config->transp_offset; + fb->fb.var.transp.length = fb_config->transp_length; - framesize = width * height * 2 * 2; + framesize = width * height * 2 * bytes_per_pixel; fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent( &pdev->dev, framesize, &fbpaddr, GFP_KERNEL); @@ -295,7 +360,8 @@ static int goldfish_fb_remove(struct platform_device *pdev) size_t framesize; struct goldfish_fb *fb = platform_get_drvdata(pdev); - framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2; + framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * + (fb->fb.var.bits_per_pixel / CHAR_BIT); unregister_framebuffer(&fb->fb); free_irq(fb->irq, fb); diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c index d357a616b05e..dff8b63eea64 100644 --- a/drivers/video/fbdev/msm/mdp3_ctrl.c +++ b/drivers/video/fbdev/msm/mdp3_ctrl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1563,9 +1563,10 @@ static int mdp3_get_metadata(struct msm_fb_data_type *mfd, } break; case metadata_op_get_ion_fd: - if (mfd->fb_ion_handle) { + if (mfd->fb_ion_handle && mfd->fb_ion_client) { metadata->data.fbmem_ionfd = - dma_buf_fd(mfd->fbmem_buf, 0); + ion_share_dma_buf_fd(mfd->fb_ion_client, + mfd->fb_ion_handle); if (metadata->data.fbmem_ionfd < 0) pr_err("fd allocation failed. fd = %d\n", metadata->data.fbmem_ionfd); diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index d0a4e2f79a57..d215faacce04 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info, info->fbmem = ioremap(res->start, resource_size(res)); if (info->fbmem == NULL) { dev_err(dev, "cannot remap framebuffer\n"); + ret = -ENXIO; goto err_mem_res; } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 53326badfb61..2add8def83be 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = { static int dlfb_select_std_channel(struct dlfb_data *dev) { int ret; - u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, + void *buf; + static const u8 set_def_chn[] = { + 0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2 }; + buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); + + if (!buf) + return -ENOMEM; + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, - set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + + kfree(buf); + return ret; } diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index 728cb6b23c42..7d8dfc7f1269 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -298,8 +298,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var, */ static int vfb_set_par(struct fb_info *info) { + switch (info->var.bits_per_pixel) { + case 1: + info->fix.visual = FB_VISUAL_MONO01; + break; + case 8: + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + break; + case 16: + case 24: + case 32: + info->fix.visual = FB_VISUAL_TRUECOLOR; + break; + } + info->fix.line_length = get_line_length(info->var.xres_virtual, info->var.bits_per_pixel); + return 0; } @@ -540,6 +555,8 @@ static int vfb_probe(struct platform_device *dev) goto err2; platform_set_drvdata(dev, info); + vfb_set_par(info); + fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n", videomemorysize >> 10); return 0; diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 1cf907ecded4..111a0ab6280a 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) } EXPORT_SYMBOL(hdmi_vendor_infoframe_init); +static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame) +{ + /* for side by side (half) we also need to provide 3D_Ext_Data */ + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + return 6; + else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return 5; + else + return 4; +} + /** * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer * @frame: HDMI infoframe @@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, u8 *ptr = buffer; size_t length; - /* empty info frame */ - if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID) - return -EINVAL; - /* only one of those can be supplied */ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) return -EINVAL; - /* for side by side (half) we also need to provide 3D_Ext_Data */ - if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) - frame->length = 6; - else - frame->length = 5; + frame->length = hdmi_vendor_infoframe_length(frame); length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; @@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, ptr[5] = 0x0c; ptr[6] = 0x00; - if (frame->vic) { - ptr[7] = 0x1 << 5; /* video format */ - ptr[8] = frame->vic; - } else { + if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) { ptr[7] = 0x2 << 5; /* video format */ ptr[8] = (frame->s3d_struct & 0xf) << 4; if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) ptr[9] = (frame->s3d_ext_data & 0xf) << 4; + } else if (frame->vic) { + ptr[7] = 0x1 << 5; /* video format */ + ptr[8] = frame->vic; + } else { + ptr[7] = 0x0 << 5; /* video format */ } hdmi_infoframe_set_checksum(buffer, length); @@ -1165,7 +1170,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR || ptr[1] != 1 || - (ptr[2] != 5 && ptr[2] != 6)) + (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6)) return -EINVAL; length = ptr[2]; @@ -1193,16 +1198,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, hvf->length = length; - if (hdmi_video_format == 0x1) { - hvf->vic = ptr[4]; - } else if (hdmi_video_format == 0x2) { + if (hdmi_video_format == 0x2) { + if (length != 5 && length != 6) + return -EINVAL; hvf->s3d_struct = ptr[4] >> 4; if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) { - if (length == 6) - hvf->s3d_ext_data = ptr[5] >> 4; - else + if (length != 6) return -EINVAL; + hvf->s3d_ext_data = ptr[5] >> 4; } + } else if (hdmi_video_format == 0x1) { + if (length != 5) + return -EINVAL; + hvf->vic = ptr[4]; + } else { + if (length != 4) + return -EINVAL; } return 0; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 286369d4f0f5..be99112fad00 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -51,6 +51,7 @@ static char expect_release; static unsigned long hpwdt_is_open; static void __iomem *pci_mem_addr; /* the PCI-memory address */ +static unsigned long __iomem *hpwdt_nmistat; static unsigned long __iomem *hpwdt_timer_reg; static unsigned long __iomem *hpwdt_timer_con; @@ -474,6 +475,11 @@ static int hpwdt_time_left(void) } #ifdef CONFIG_HPWDT_NMI_DECODING +static int hpwdt_my_nmi(void) +{ + return ioread8(hpwdt_nmistat) & 0x6; +} + /* * NMI Handler */ @@ -485,6 +491,9 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) if (!hpwdt_nmi_decoding) goto out; + if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) + return NMI_DONE; + spin_lock_irqsave(&rom_lock, rom_pl); if (!die_nmi_called && !is_icru && !is_uefi) asminline_call(&cmn_regs, cru_rom_addr); @@ -700,7 +709,7 @@ static void dmi_find_icru(const struct dmi_header *dm, void *dummy) smbios_proliant_ptr = (struct smbios_proliant_info *) dm; if (smbios_proliant_ptr->misc_features & 0x01) is_icru = 1; - if (smbios_proliant_ptr->misc_features & 0x408) + if (smbios_proliant_ptr->misc_features & 0x1400) is_uefi = 1; } } @@ -840,6 +849,7 @@ static int hpwdt_init_one(struct pci_dev *dev, retval = -ENOMEM; goto error_pci_iomap; } + hpwdt_nmistat = pci_mem_addr + 0x6e; hpwdt_timer_reg = pci_mem_addr + 0x70; hpwdt_timer_con = pci_mem_addr + 0x72; @@ -68,9 +68,9 @@ struct aio_ring { #define AIO_RING_PAGES 8 struct kioctx_table { - struct rcu_head rcu; - unsigned nr; - struct kioctx *table[]; + struct rcu_head rcu; + unsigned nr; + struct kioctx __rcu *table[]; }; struct kioctx_cpu { @@ -115,7 +115,8 @@ struct kioctx { struct page **ring_pages; long nr_pages; - struct work_struct free_work; + struct rcu_head free_rcu; + struct work_struct free_work; /* see free_ioctx() */ /* * signals when all in-flight requests are done @@ -327,7 +328,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma) for (i = 0; i < table->nr; i++) { struct kioctx *ctx; - ctx = table->table[i]; + ctx = rcu_dereference(table->table[i]); if (ctx && ctx->aio_ring_file == file) { if (!atomic_read(&ctx->dead)) { ctx->user_id = ctx->mmap_base = vma->vm_start; @@ -574,6 +575,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) return cancel(&kiocb->common); } +/* + * free_ioctx() should be RCU delayed to synchronize against the RCU + * protected lookup_ioctx() and also needs process context to call + * aio_free_ring(), so the double bouncing through kioctx->free_rcu and + * ->free_work. + */ static void free_ioctx(struct work_struct *work) { struct kioctx *ctx = container_of(work, struct kioctx, free_work); @@ -587,6 +594,14 @@ static void free_ioctx(struct work_struct *work) kmem_cache_free(kioctx_cachep, ctx); } +static void free_ioctx_rcufn(struct rcu_head *head) +{ + struct kioctx *ctx = container_of(head, struct kioctx, free_rcu); + + INIT_WORK(&ctx->free_work, free_ioctx); + schedule_work(&ctx->free_work); +} + static void free_ioctx_reqs(struct percpu_ref *ref) { struct kioctx *ctx = container_of(ref, struct kioctx, reqs); @@ -595,8 +610,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) complete(&ctx->rq_wait->comp); - INIT_WORK(&ctx->free_work, free_ioctx); - schedule_work(&ctx->free_work); + /* Synchronize against RCU protected table->table[] dereferences */ + call_rcu(&ctx->free_rcu, free_ioctx_rcufn); } /* @@ -637,9 +652,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) while (1) { if (table) for (i = 0; i < table->nr; i++) - if (!table->table[i]) { + if (!rcu_access_pointer(table->table[i])) { ctx->id = i; - table->table[i] = ctx; + rcu_assign_pointer(table->table[i], ctx); spin_unlock(&mm->ioctx_lock); /* While kioctx setup is in progress, @@ -814,11 +829,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, } table = rcu_dereference_raw(mm->ioctx_table); - WARN_ON(ctx != table->table[ctx->id]); - table->table[ctx->id] = NULL; + WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); + RCU_INIT_POINTER(table->table[ctx->id], NULL); spin_unlock(&mm->ioctx_lock); - /* percpu_ref_kill() will do the necessary call_rcu() */ + /* free_ioctx_reqs() will do the necessary RCU synchronization */ wake_up_all(&ctx->wait); /* @@ -860,7 +875,8 @@ void exit_aio(struct mm_struct *mm) skipped = 0; for (i = 0; i < table->nr; ++i) { - struct kioctx *ctx = table->table[i]; + struct kioctx *ctx = + rcu_dereference_protected(table->table[i], true); if (!ctx) { skipped++; @@ -1049,7 +1065,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) if (!table || id >= table->nr) goto out; - ctx = table->table[id]; + ctx = rcu_dereference(table->table[id]); if (ctx && ctx->user_id == ctx_id) { percpu_ref_get(&ctx->users); ret = ctx; diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index fb3e64d37cb4..6b16b8653d98 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -82,12 +82,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans, switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; - if (acl) { - ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); - if (ret) - return ret; - } - ret = 0; break; case ACL_TYPE_DEFAULT: if (!S_ISDIR(inode->i_mode)) @@ -123,7 +117,18 @@ out: int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) { - return __btrfs_set_acl(NULL, inode, acl, type); + int ret; + umode_t old_mode = inode->i_mode; + + if (type == ACL_TYPE_ACCESS && acl) { + ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (ret) + return ret; + } + ret = __btrfs_set_acl(NULL, inode, acl, type); + if (ret) + inode->i_mode = old_mode; + return ret; } /* diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e767f347f2b1..88bee6703cc0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2534,7 +2534,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) if (!uptodate) { ClearPageUptodate(page); SetPageError(page); - ret = ret < 0 ? ret : -EIO; + ret = err < 0 ? err : -EIO; mapping_set_error(page->mapping, ret); } return 0; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index c5bbb5300658..19b56873b797 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5008,13 +5008,19 @@ static int is_extent_unchanged(struct send_ctx *sctx, while (key.offset < ekey->offset + left_len) { ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); right_type = btrfs_file_extent_type(eb, ei); - if (right_type != BTRFS_FILE_EXTENT_REG) { + if (right_type != BTRFS_FILE_EXTENT_REG && + right_type != BTRFS_FILE_EXTENT_INLINE) { ret = 0; goto out; } right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); - right_len = btrfs_file_extent_num_bytes(eb, ei); + if (right_type == BTRFS_FILE_EXTENT_INLINE) { + right_len = btrfs_file_extent_inline_len(eb, slot, ei); + right_len = PAGE_ALIGN(right_len); + } else { + right_len = btrfs_file_extent_num_bytes(eb, ei); + } right_offset = btrfs_file_extent_offset(eb, ei); right_gen = btrfs_file_extent_generation(eb, ei); @@ -5028,6 +5034,19 @@ static int is_extent_unchanged(struct send_ctx *sctx, goto out; } + /* + * We just wanted to see if when we have an inline extent, what + * follows it is a regular extent (wanted to check the above + * condition for inline extents too). This should normally not + * happen but it's possible for example when we have an inline + * compressed extent representing data with a size matching + * the page size (currently the same as sector size). + */ + if (right_type == BTRFS_FILE_EXTENT_INLINE) { + ret = 0; + goto out; + } + left_offset_fixed = left_offset; if (key.offset < ekey->offset) { /* Fix the right offset for 2a and 7. */ diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 600c67ef8a03..6d874b1cd53c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -568,6 +568,7 @@ void btrfs_free_stale_device(struct btrfs_device *cur_dev) btrfs_sysfs_remove_fsid(fs_devs); list_del(&fs_devs->list); free_fs_devices(fs_devs); + break; } else { fs_devs->num_devices--; list_del(&dev->dev_list); @@ -4638,10 +4639,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, if (devs_max && ndevs > devs_max) ndevs = devs_max; /* - * the primary goal is to maximize the number of stripes, so use as many - * devices as possible, even if the stripes are not maximum sized. + * The primary goal is to maximize the number of stripes, so use as + * many devices as possible, even if the stripes are not maximum sized. + * + * The DUP profile stores more than one stripe per device, the + * max_avail is the total size so we have to adjust. */ - stripe_size = devices_info[ndevs-1].max_avail; + stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes); num_stripes = ndevs * dev_stripes; /* @@ -4681,8 +4685,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, stripe_size = devices_info[ndevs-1].max_avail; } - stripe_size = div_u64(stripe_size, dev_stripes); - /* align to BTRFS_STRIPE_LEN */ stripe_size = div_u64(stripe_size, raid_stripe_len); stripe_size *= raid_stripe_len; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 744be3c146f5..0141aba9eca6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -589,7 +589,7 @@ cifs_relock_file(struct cifsFileInfo *cfile) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; - down_read(&cinode->lock_sem); + down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); if (cinode->can_cache_brlcks) { /* can cache locks - no need to relock */ up_read(&cinode->lock_sem); diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index abae6dd2c6b9..cc88f4f0325e 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -980,10 +980,10 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) cifs_dbg(VFS, "illegal hours %d\n", st->Hours); days = sd->Day; month = sd->Month; - if ((days > 31) || (month > 12)) { + if (days < 1 || days > 31 || month < 1 || month > 12) { cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days); - if (month > 12) - month = 12; + days = clamp(days, 1, 31); + month = clamp(month, 1, 12); } month -= 1; days += total_days_of_prev_months[month]; diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index e88ffe1da045..a035d1a95882 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -344,13 +344,12 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; - if (ses->server->sign) { + NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | + NTLMSSP_NEGOTIATE_SEAL; + if (ses->server->sign) flags |= NTLMSSP_NEGOTIATE_SIGN; - if (!ses->server->session_estab || - ses->ntlmssp->sesskey_per_smbsess) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; - } + if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) + flags |= NTLMSSP_NEGOTIATE_KEY_XCH; sec_blob->NegotiateFlags = cpu_to_le32(flags); @@ -407,13 +406,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer, flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; - if (ses->server->sign) { + NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | + NTLMSSP_NEGOTIATE_SEAL; + if (ses->server->sign) flags |= NTLMSSP_NEGOTIATE_SIGN; - if (!ses->server->session_estab || - ses->ntlmssp->sesskey_per_smbsess) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; - } + if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) + flags |= NTLMSSP_NEGOTIATE_KEY_XCH; tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 84614a5edb87..807e989f436a 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -832,10 +832,8 @@ ssetup_exit: if (!rc) { mutex_lock(&server->srv_mutex); - if (server->sign && server->ops->generate_signingkey) { + if (server->ops->generate_signingkey) { rc = server->ops->generate_signingkey(ses); - kfree(ses->auth_key.response); - ses->auth_key.response = NULL; if (rc) { cifs_dbg(FYI, "SMB3 session key generation failed\n"); @@ -857,10 +855,6 @@ ssetup_exit: } keygen_exit: - if (!server->sign) { - kfree(ses->auth_key.response); - ses->auth_key.response = NULL; - } if (spnego_key) { key_invalidate(spnego_key); key_put(spnego_key); @@ -1005,15 +999,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, goto tcon_exit; } - if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) + switch (rsp->ShareType) { + case SMB2_SHARE_TYPE_DISK: cifs_dbg(FYI, "connection to disk share\n"); - else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { + break; + case SMB2_SHARE_TYPE_PIPE: tcon->ipc = true; cifs_dbg(FYI, "connection to pipe share\n"); - } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { - tcon->print = true; + break; + case SMB2_SHARE_TYPE_PRINT: + tcon->ipc = true; cifs_dbg(FYI, "connection to printer\n"); - } else { + break; + default: cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); rc = -EOPNOTSUPP; goto tcon_error_exit; @@ -1558,6 +1556,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, } else iov[0].iov_len = get_rfc1002_length(req) + 4; + /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ + if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) + req->hdr.Flags |= SMB2_FLAGS_SIGNED; rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0); rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base; diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index b4967f7aaad0..42e014cdd59f 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -811,7 +811,7 @@ static int compat_ioctl_preallocate(struct file *file, */ #define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff) -#define COMPATIBLE_IOCTL(cmd) XFORM(cmd), +#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd), /* ioctl should not be warned about even if it's not implemented. Valid reasons to use this: - It is implemented with ->compat_ioctl on some device, but programs diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 732a786cce9d..ce654526c0fb 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -27,6 +27,7 @@ #include <linux/dcache.h> #include <linux/namei.h> #include <crypto/aes.h> +#include <crypto/skcipher.h> #include "fscrypt_private.h" static unsigned int num_prealloc_crypto_pages = 32; diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 6eb434363ff2..b18fa323d1d9 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -12,42 +12,46 @@ #include <linux/scatterlist.h> #include <linux/ratelimit.h> +#include <crypto/skcipher.h> #include "fscrypt_private.h" +static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) +{ + if (str->len == 1 && str->name[0] == '.') + return true; + + if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + return true; + + return false; +} + /** * fname_encrypt() - encrypt a filename * - * The caller must have allocated sufficient memory for the @oname string. + * The output buffer must be at least as large as the input buffer. + * Any extra space is filled with NUL padding before encryption. * * Return: 0 on success, -errno on failure */ -static int fname_encrypt(struct inode *inode, - const struct qstr *iname, struct fscrypt_str *oname) +int fname_encrypt(struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen) { struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); - struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = inode->i_crypt_info->ci_ctfm; int res = 0; char iv[FS_CRYPTO_BLOCK_SIZE]; struct scatterlist sg; - int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); - unsigned int lim; - unsigned int cryptlen; - - lim = inode->i_sb->s_cop->max_namelen(inode); - if (iname->len <= 0 || iname->len > lim) - return -EIO; /* * Copy the filename to the output buffer for encrypting in-place and * pad it with the needed number of NUL bytes. */ - cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); - cryptlen = round_up(cryptlen, padding); - cryptlen = min(cryptlen, lim); - memcpy(oname->name, iname->name, iname->len); - memset(oname->name + iname->len, 0, cryptlen - iname->len); + if (WARN_ON(olen < iname->len)) + return -ENOBUFS; + memcpy(out, iname->name, iname->len); + memset(out + iname->len, 0, olen - iname->len); /* Initialize the IV */ memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); @@ -62,8 +66,8 @@ static int fname_encrypt(struct inode *inode, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); - sg_init_one(&sg, oname->name, cryptlen); - skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); + sg_init_one(&sg, out, olen); + skcipher_request_set_crypt(req, &sg, &sg, olen, iv); /* Do the encryption */ res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait); @@ -74,7 +78,6 @@ static int fname_encrypt(struct inode *inode, return res; } - oname->len = cryptlen; return 0; } @@ -187,50 +190,52 @@ static int digest_decode(const char *src, int len, char *dst) return cp - dst; } -u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen) +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret) { - int padding = 32; - struct fscrypt_info *ci = inode->i_crypt_info; - - if (ci) - padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); - ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE); - return round_up(ilen, padding); + int padding = 4 << (inode->i_crypt_info->ci_flags & + FS_POLICY_FLAGS_PAD_MASK); + u32 encrypted_len; + + if (orig_len > max_len) + return false; + encrypted_len = max(orig_len, (u32)FS_CRYPTO_BLOCK_SIZE); + encrypted_len = round_up(encrypted_len, padding); + *encrypted_len_ret = min(encrypted_len, max_len); + return true; } -EXPORT_SYMBOL(fscrypt_fname_encrypted_size); /** - * fscrypt_fname_crypto_alloc_obuff() - + * fscrypt_fname_alloc_buffer - allocate a buffer for presented filenames + * + * Allocate a buffer that is large enough to hold any decrypted or encoded + * filename (null-terminated), for the given maximum encrypted filename length. * - * Allocates an output buffer that is sufficient for the crypto operation - * specified by the context and the direction. + * Return: 0 on success, -errno on failure */ int fscrypt_fname_alloc_buffer(const struct inode *inode, - u32 ilen, struct fscrypt_str *crypto_str) + u32 max_encrypted_len, + struct fscrypt_str *crypto_str) { - u32 olen = fscrypt_fname_encrypted_size(inode, ilen); const u32 max_encoded_len = max_t(u32, BASE64_CHARS(FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE), 1 + BASE64_CHARS(sizeof(struct fscrypt_digested_name))); + u32 max_presented_len; - crypto_str->len = olen; - olen = max(olen, max_encoded_len); + max_presented_len = max(max_encoded_len, max_encrypted_len); - /* - * Allocated buffer can hold one more character to null-terminate the - * string - */ - crypto_str->name = kmalloc(olen + 1, GFP_NOFS); - if (!(crypto_str->name)) + crypto_str->name = kmalloc(max_presented_len + 1, GFP_NOFS); + if (!crypto_str->name) return -ENOMEM; + crypto_str->len = max_presented_len; return 0; } EXPORT_SYMBOL(fscrypt_fname_alloc_buffer); /** - * fscrypt_fname_crypto_free_buffer() - + * fscrypt_fname_free_buffer - free the buffer for presented filenames * - * Frees the buffer allocated for crypto operation. + * Free the buffer allocated by fscrypt_fname_alloc_buffer(). */ void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) { @@ -297,35 +302,6 @@ int fscrypt_fname_disk_to_usr(struct inode *inode, EXPORT_SYMBOL(fscrypt_fname_disk_to_usr); /** - * fscrypt_fname_usr_to_disk() - converts a filename from user space to disk - * space - * - * The caller must have allocated sufficient memory for the @oname string. - * - * Return: 0 on success, -errno on failure - */ -int fscrypt_fname_usr_to_disk(struct inode *inode, - const struct qstr *iname, - struct fscrypt_str *oname) -{ - if (fscrypt_is_dot_dotdot(iname)) { - oname->name[0] = '.'; - oname->name[iname->len - 1] = '.'; - oname->len = iname->len; - return 0; - } - if (inode->i_crypt_info) - return fname_encrypt(inode, iname, oname); - /* - * Without a proper key, a user is not allowed to modify the filenames - * in a directory. Consequently, a user space name cannot be mapped to - * a disk-space name - */ - return -ENOKEY; -} -EXPORT_SYMBOL(fscrypt_fname_usr_to_disk); - -/** * fscrypt_setup_filename() - prepare to search a possibly encrypted directory * @dir: the directory that will be searched * @iname: the user-provided filename being searched for @@ -368,11 +344,17 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, return ret; if (dir->i_crypt_info) { - ret = fscrypt_fname_alloc_buffer(dir, iname->len, - &fname->crypto_buf); - if (ret) - return ret; - ret = fname_encrypt(dir, iname, &fname->crypto_buf); + if (!fscrypt_fname_encrypted_size(dir, iname->len, + dir->i_sb->s_cop->max_namelen(dir), + &fname->crypto_buf.len)) + return -ENAMETOOLONG; + fname->crypto_buf.name = kmalloc(fname->crypto_buf.len, + GFP_NOFS); + if (!fname->crypto_buf.name) + return -ENOMEM; + + ret = fname_encrypt(dir, iname, fname->crypto_buf.name, + fname->crypto_buf.len); if (ret) goto errout; fname->disk_name.name = fname->crypto_buf.name; @@ -424,7 +406,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, return 0; errout: - fscrypt_fname_free_buffer(&fname->crypto_buf); + kfree(fname->crypto_buf.name); return ret; } EXPORT_SYMBOL(fscrypt_setup_filename); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index c3ad415cd14f..5c296d4af4a9 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -49,6 +49,15 @@ struct fscrypt_context { #define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 +/** + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ +struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; +} __packed; + /* * A pointer to this structure is stored in the file system's in-core * representation of an inode. @@ -81,7 +90,22 @@ static inline void bio_set_op_attrs(struct bio *bio, unsigned op, bio->bi_rw = op | op_flags; } +static inline bool fscrypt_valid_enc_modes(u32 contents_mode, + u32 filenames_mode) +{ + if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && + filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) + return true; + + if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && + filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) + return true; + + return false; +} + /* crypto.c */ +extern struct kmem_cache *fscrypt_info_cachep; extern int fscrypt_initialize(unsigned int cop_flags); extern struct workqueue_struct *fscrypt_read_workqueue; extern int fscrypt_do_page_crypto(const struct inode *inode, @@ -93,6 +117,13 @@ extern int fscrypt_do_page_crypto(const struct inode *inode, extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, gfp_t gfp_flags); +/* fname.c */ +extern int fname_encrypt(struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); +extern bool fscrypt_fname_encrypted_size(const struct inode *inode, + u32 orig_len, u32 max_len, + u32 *encrypted_len_ret); + /* keyinfo.c */ extern void __exit fscrypt_essiv_cleanup(void); diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c index 9f5fb2eb9cf7..bc010e4609ef 100644 --- a/fs/crypto/hooks.c +++ b/fs/crypto/hooks.c @@ -110,3 +110,159 @@ int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry) return 0; } EXPORT_SYMBOL_GPL(__fscrypt_prepare_lookup); + +int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + int err; + + /* + * To calculate the size of the encrypted symlink target we need to know + * the amount of NUL padding, which is determined by the flags set in + * the encryption policy which will be inherited from the directory. + * The easiest way to get access to this is to just load the directory's + * fscrypt_info, since we'll need it to create the dir_entry anyway. + * + * Note: in test_dummy_encryption mode, @dir may be unencrypted. + */ + err = fscrypt_get_encryption_info(dir); + if (err) + return err; + if (!fscrypt_has_encryption_key(dir)) + return -ENOKEY; + + /* + * Calculate the size of the encrypted symlink and verify it won't + * exceed max_len. Note that for historical reasons, encrypted symlink + * targets are prefixed with the ciphertext length, despite this + * actually being redundant with i_size. This decreases by 2 bytes the + * longest symlink target we can accept. + * + * We could recover 1 byte by not counting a null terminator, but + * counting it (even though it is meaningless for ciphertext) is simpler + * for now since filesystems will assume it is there and subtract it. + */ + if (!fscrypt_fname_encrypted_size(dir, len, + max_len - sizeof(struct fscrypt_symlink_data), + &disk_link->len)) + return -ENAMETOOLONG; + disk_link->len += sizeof(struct fscrypt_symlink_data); + + disk_link->name = NULL; + return 0; +} +EXPORT_SYMBOL_GPL(__fscrypt_prepare_symlink); + +int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, struct fscrypt_str *disk_link) +{ + int err; + struct qstr iname = QSTR_INIT(target, len); + struct fscrypt_symlink_data *sd; + unsigned int ciphertext_len; + + err = fscrypt_require_key(inode); + if (err) + return err; + + if (disk_link->name) { + /* filesystem-provided buffer */ + sd = (struct fscrypt_symlink_data *)disk_link->name; + } else { + sd = kmalloc(disk_link->len, GFP_NOFS); + if (!sd) + return -ENOMEM; + } + ciphertext_len = disk_link->len - sizeof(*sd); + sd->len = cpu_to_le16(ciphertext_len); + + err = fname_encrypt(inode, &iname, sd->encrypted_path, ciphertext_len); + if (err) { + if (!disk_link->name) + kfree(sd); + return err; + } + /* + * Null-terminating the ciphertext doesn't make sense, but we still + * count the null terminator in the length, so we might as well + * initialize it just in case the filesystem writes it out. + */ + sd->encrypted_path[ciphertext_len] = '\0'; + + if (!disk_link->name) + disk_link->name = (unsigned char *)sd; + return 0; +} +EXPORT_SYMBOL_GPL(__fscrypt_encrypt_symlink); + +/** + * fscrypt_get_symlink - get the target of an encrypted symlink + * @inode: the symlink inode + * @caddr: the on-disk contents of the symlink + * @max_size: size of @caddr buffer + * @done: if successful, will be set up to free the returned target + * + * If the symlink's encryption key is available, we decrypt its target. + * Otherwise, we encode its target for presentation. + * + * This may sleep, so the filesystem must have dropped out of RCU mode already. + * + * Return: the presentable symlink target or an ERR_PTR() + */ +void *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size) +{ + const struct fscrypt_symlink_data *sd; + struct fscrypt_str cstr, pstr; + int err; + + /* This is for encrypted symlinks only */ + if (WARN_ON(!IS_ENCRYPTED(inode))) + return ERR_PTR(-EINVAL); + + /* + * Try to set up the symlink's encryption key, but we can continue + * regardless of whether the key is available or not. + */ + err = fscrypt_get_encryption_info(inode); + if (err) + return ERR_PTR(err); + + /* + * For historical reasons, encrypted symlink targets are prefixed with + * the ciphertext length, even though this is redundant with i_size. + */ + + if (max_size < sizeof(*sd)) + return ERR_PTR(-EUCLEAN); + sd = caddr; + cstr.name = (unsigned char *)sd->encrypted_path; + cstr.len = le16_to_cpu(sd->len); + + if (cstr.len == 0) + return ERR_PTR(-EUCLEAN); + + if (cstr.len + sizeof(*sd) - 1 > max_size) + return ERR_PTR(-EUCLEAN); + + err = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); + if (err) + return ERR_PTR(err); + + err = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); + if (err) + goto err_kfree; + + err = -EUCLEAN; + if (pstr.name[0] == '\0') + goto err_kfree; + + pstr.name[pstr.len] = '\0'; + return pstr.name; + +err_kfree: + kfree(pstr.name); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(fscrypt_get_symlink); diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 444c65ed6db8..7c00331da5df 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -13,6 +13,7 @@ #include <linux/ratelimit.h> #include <crypto/aes.h> #include <crypto/sha.h> +#include <crypto/skcipher.h> #include "fscrypt_private.h" static struct crypto_shash *essiv_hash_tfm; diff --git a/fs/dcache.c b/fs/dcache.c index ba56a39a3b74..750ddc627855 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -634,11 +634,16 @@ again: spin_unlock(&parent->d_lock); goto again; } - rcu_read_unlock(); - if (parent != dentry) + if (parent != dentry) { spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - else + if (unlikely(dentry->d_lockref.count < 0)) { + spin_unlock(&parent->d_lock); + parent = NULL; + } + } else { parent = NULL; + } + rcu_read_unlock(); return parent; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index a8b1749d79a8..debf0707789d 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -460,7 +460,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, int i, num; unsigned long nr_pages; - num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); + num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, (pgoff_t)num); if (nr_pages == 0) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 27ff3706d632..03b874f3fefd 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3875,7 +3875,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { - ext4_error(sb, "Error loading buddy information for %u", group); + ext4_warning(sb, "Error %d loading buddy information for %u", + err, group); put_bh(bitmap_bh); return 0; } @@ -4032,10 +4033,11 @@ repeat: BUG_ON(pa->pa_type != MB_INODE_PA); group = ext4_get_group_number(sb, pa->pa_pstart); - err = ext4_mb_load_buddy(sb, group, &e4b); + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, + GFP_NOFS|__GFP_NOFAIL); if (err) { - ext4_error(sb, "Error loading buddy information for %u", - group); + ext4_error(sb, "Error %d loading buddy information for %u", + err, group); continue; } @@ -4291,11 +4293,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, spin_unlock(&lg->lg_prealloc_lock); list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { + int err; group = ext4_get_group_number(sb, pa->pa_pstart); - if (ext4_mb_load_buddy(sb, group, &e4b)) { - ext4_error(sb, "Error loading buddy information for %u", - group); + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, + GFP_NOFS|__GFP_NOFAIL); + if (err) { + ext4_error(sb, "Error %d loading buddy information for %u", + err, group); continue; } ext4_lock_group(sb, group); @@ -5121,8 +5126,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ret = ext4_mb_load_buddy(sb, group, &e4b); if (ret) { - ext4_error(sb, "Error in loading buddy " - "information for %u", group); + ext4_warning(sb, "Error %d loading buddy information for %u", + ret, group); return ret; } bitmap = e4b.bd_bitmap; diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index b310ed81c10e..c356b49540cb 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -832,8 +832,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, if (!IS_LAST_ENTRY(s->first)) ext4_xattr_rehash(header(s->base), s->here); - ext4_xattr_cache_insert(ext4_mb_cache, - bs->bh); } unlock_buffer(bs->bh); if (error == -EFSCORRUPTED) @@ -944,6 +942,7 @@ inserted: } else if (bs->bh && s->base == bs->bh->b_data) { /* We were modifying this block in-place. */ ea_bdebug(bs->bh, "keeping this block"); + ext4_xattr_cache_insert(ext4_mb_cache, bs->bh); new_bh = bs->bh; get_bh(new_bh); } else { diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 3c343e922f6e..04c608646fd5 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -68,6 +68,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, .old_blkaddr = index, .new_blkaddr = index, .encrypted_page = NULL, + .is_meta = is_meta, }; if (unlikely(!is_meta)) @@ -163,6 +164,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, REQ_RAHEAD, .encrypted_page = NULL, .in_list = false, + .is_meta = (type != META_POR), }; struct blk_plug plug; @@ -573,13 +575,8 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) struct node_info ni; int err = acquire_orphan_inode(sbi); - if (err) { - set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: orphan failed (ino=%x), run fsck to fix.", - __func__, ino); - return err; - } + if (err) + goto err_out; __add_ino_entry(sbi, ino, 0, ORPHAN_INO); @@ -593,6 +590,11 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) return PTR_ERR(inode); } + err = dquot_initialize(inode); + if (err) + goto err_out; + + dquot_initialize(inode); clear_nlink(inode); /* truncate all the data during iput */ @@ -602,14 +604,18 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) /* ENOMEM was fully retried in f2fs_evict_inode. */ if (ni.blk_addr != NULL_ADDR) { - set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "%s: orphan failed (ino=%x) by kernel, retry mount.", - __func__, ino); - return -EIO; + err = -EIO; + goto err_out; } __remove_ino_entry(sbi, ino, ORPHAN_INO); return 0; + +err_out: + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_msg(sbi->sb, KERN_WARNING, + "%s: orphan failed (ino=%x), run fsck to fix.", + __func__, ino); + return err; } int recover_orphan_inodes(struct f2fs_sb_info *sbi) @@ -1140,6 +1146,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) if (cpc->reason & CP_TRIMMED) __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG); + else + __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG); if (cpc->reason & CP_UMOUNT) __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); @@ -1166,6 +1174,39 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) spin_unlock_irqrestore(&sbi->cp_lock, flags); } +static void commit_checkpoint(struct f2fs_sb_info *sbi, + void *src, block_t blk_addr) +{ + struct writeback_control wbc = { + .for_reclaim = 0, + }; + + /* + * pagevec_lookup_tag and lock_page again will take + * some extra time. Therefore, update_meta_pages and + * sync_meta_pages are combined in this function. + */ + struct page *page = grab_meta_page(sbi, blk_addr); + int err; + + memcpy(page_address(page), src, PAGE_SIZE); + set_page_dirty(page); + + f2fs_wait_on_page_writeback(page, META, true); + f2fs_bug_on(sbi, PageWriteback(page)); + if (unlikely(!clear_page_dirty_for_io(page))) + f2fs_bug_on(sbi, 1); + + /* writeout cp pack 2 page */ + err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO); + f2fs_bug_on(sbi, err); + + f2fs_put_page(page, 0); + + /* submit checkpoint (with barrier if NOBARRIER is not set) */ + f2fs_submit_merged_write(sbi, META_FLUSH); +} + static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); @@ -1268,16 +1309,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) } } - /* need to wait for end_io results */ - wait_on_all_pages_writeback(sbi); - if (unlikely(f2fs_cp_error(sbi))) - return -EIO; - - /* flush all device cache */ - err = f2fs_flush_device_cache(sbi); - if (err) - return err; - /* write out checkpoint buffer at block 0 */ update_meta_page(sbi, ckpt, start_blk++); @@ -1305,26 +1336,26 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) start_blk += NR_CURSEG_NODE_TYPE; } - /* writeout checkpoint block */ - update_meta_page(sbi, ckpt, start_blk); + /* update user_block_counts */ + sbi->last_valid_block_count = sbi->total_valid_block_count; + percpu_counter_set(&sbi->alloc_valid_block_count, 0); + + /* Here, we have one bio having CP pack except cp pack 2 page */ + sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); - /* wait for previous submitted node/meta pages writeback */ + /* wait for previous submitted meta pages writeback */ wait_on_all_pages_writeback(sbi); if (unlikely(f2fs_cp_error(sbi))) return -EIO; - filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LLONG_MAX); - filemap_fdatawait_range(META_MAPPING(sbi), 0, LLONG_MAX); - - /* update user_block_counts */ - sbi->last_valid_block_count = sbi->total_valid_block_count; - percpu_counter_set(&sbi->alloc_valid_block_count, 0); - - /* Here, we only have one bio having CP pack */ - sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO); + /* flush all device cache */ + err = f2fs_flush_device_cache(sbi); + if (err) + return err; - /* wait for previous submitted meta pages writeback */ + /* barrier and flush checkpoint cp pack 2 page if it can */ + commit_checkpoint(sbi, ckpt, start_blk); wait_on_all_pages_writeback(sbi); release_ino_entry(sbi, false); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index d5299265feea..ad44712bb902 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -174,15 +174,22 @@ static bool __same_bdev(struct f2fs_sb_info *sbi, */ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, struct writeback_control *wbc, - int npages, bool is_read) + int npages, bool is_read, + enum page_type type, enum temp_type temp) { struct bio *bio; bio = f2fs_bio_alloc(sbi, npages, true); f2fs_target_device(sbi, blk_addr, bio); - bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; - bio->bi_private = is_read ? NULL : sbi; + if (is_read) { + bio->bi_end_io = f2fs_read_end_io; + bio->bi_private = NULL; + } else { + bio->bi_end_io = f2fs_write_end_io; + bio->bi_private = sbi; + bio->bi_write_hint = io_type_to_rw_hint(sbi, type, temp); + } if (wbc) wbc_init_bio(wbc, bio); @@ -195,13 +202,12 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, if (!is_read_io(bio_op(bio))) { unsigned int start; - if (f2fs_sb_mounted_blkzoned(sbi->sb) && - current->plug && (type == DATA || type == NODE)) - blk_finish_plug(current->plug); - if (type != DATA && type != NODE) goto submit_io; + if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug) + blk_finish_plug(current->plug); + start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; start %= F2FS_IO_SIZE(sbi); @@ -376,12 +382,13 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; + verify_block_addr(fio, fio->new_blkaddr); trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(fio, 0); /* Allocate a new bio */ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, - 1, is_read_io(fio->op)); + 1, is_read_io(fio->op), fio->type, fio->temp); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); @@ -421,8 +428,8 @@ next: } if (fio->old_blkaddr != NEW_ADDR) - verify_block_addr(sbi, fio->old_blkaddr); - verify_block_addr(sbi, fio->new_blkaddr); + verify_block_addr(fio, fio->old_blkaddr); + verify_block_addr(fio, fio->new_blkaddr); bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; @@ -444,7 +451,8 @@ alloc_new: goto out_fail; } io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc, - BIO_MAX_PAGES, false); + BIO_MAX_PAGES, false, + fio->type, fio->temp); io->fio = *fio; } @@ -831,13 +839,6 @@ alloc: return 0; } -static inline bool __force_buffered_io(struct inode *inode, int rw) -{ - return (f2fs_encrypted_file(inode) || - (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) || - F2FS_I_SB(inode)->s_ndevs); -} - int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); @@ -868,9 +869,8 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) map.m_seg_type = NO_CHECK_TYPE; if (direct_io) { - /* map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint); */ - map.m_seg_type = rw_hint_to_seg_type(WRITE_LIFE_NOT_SET); - flag = __force_buffered_io(inode, WRITE) ? + map.m_seg_type = rw_hint_to_seg_type(iocb->ki_hint); + flag = f2fs_force_buffered_io(inode, WRITE) ? F2FS_GET_BLOCK_PRE_AIO : F2FS_GET_BLOCK_PRE_DIO; goto map_blocks; @@ -1114,6 +1114,31 @@ out: return err; } +bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len) +{ + struct f2fs_map_blocks map; + block_t last_lblk; + int err; + + if (pos + len > i_size_read(inode)) + return false; + + map.m_lblk = F2FS_BYTES_TO_BLK(pos); + map.m_next_pgofs = NULL; + map.m_next_extent = NULL; + map.m_seg_type = NO_CHECK_TYPE; + last_lblk = F2FS_BLK_ALIGN(pos + len); + + while (map.m_lblk < last_lblk) { + map.m_len = last_lblk - map.m_lblk; + err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); + if (err || map.m_len == 0) + return false; + map.m_lblk += map.m_len; + } + return true; +} + static int __get_data_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create, int flag, pgoff_t *next_pgofs, int seg_type) @@ -1151,8 +1176,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock, return __get_data_block(inode, iblock, bh_result, create, F2FS_GET_BLOCK_DEFAULT, NULL, rw_hint_to_seg_type( - WRITE_LIFE_NOT_SET)); - /* inode->i_write_hint)); */ + inode->i_write_hint)); } static int get_data_block_bmap(struct inode *inode, sector_t iblock, @@ -2304,15 +2328,18 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, { struct address_space *mapping = iocb->ki_filp->f_mapping; struct inode *inode = mapping->host; + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t count = iov_iter_count(iter); int rw = iov_iter_rw(iter); int err; + enum rw_hint hint = iocb->ki_hint; + int whint_mode = F2FS_OPTION(sbi).whint_mode; err = check_direct_IO(inode, iter, offset); if (err) return err; - if (__force_buffered_io(inode, rw)) + if (f2fs_force_buffered_io(inode, rw)) return 0; if (trace_android_fs_dataread_start_enabled() && @@ -2339,11 +2366,24 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, } trace_f2fs_direct_IO_enter(inode, offset, count, rw); - down_read(&F2FS_I(inode)->dio_rwsem[rw]); + if (rw == WRITE && whint_mode == WHINT_MODE_OFF) + iocb->ki_hint = WRITE_LIFE_NOT_SET; + + if (!down_read_trylock(&F2FS_I(inode)->dio_rwsem[rw])) { + if (iocb->ki_flags & IOCB_NOWAIT) { + iocb->ki_hint = hint; + err = -EAGAIN; + goto out; + } + down_read(&F2FS_I(inode)->dio_rwsem[rw]); + } + err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); up_read(&F2FS_I(inode)->dio_rwsem[rw]); if (rw == WRITE) { + if (whint_mode == WHINT_MODE_OFF) + iocb->ki_hint = hint; if (err > 0) { f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO, err); @@ -2352,7 +2392,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, f2fs_write_failed(mapping, offset + count); } } - +out: if (trace_android_fs_dataread_start_enabled() && (iov_iter_rw(iter) == READ)) trace_android_fs_dataread_end(inode, offset, count); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index bde445e4e690..41d32171bd52 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -94,14 +94,12 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, struct f2fs_dir_entry *de; struct f2fs_dentry_ptr d; - dentry_blk = (struct f2fs_dentry_block *)kmap(dentry_page); + dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); make_dentry_ptr_block(NULL, &d, dentry_blk); de = find_target_dentry(fname, namehash, max_slots, &d); if (de) *res_page = dentry_page; - else - kunmap(dentry_page); return de; } @@ -287,7 +285,6 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, de = f2fs_find_entry(dir, qstr, page); if (de) { res = le32_to_cpu(de->ino); - f2fs_dentry_kunmap(dir, *page); f2fs_put_page(*page, 0); } @@ -302,7 +299,6 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_wait_on_page_writeback(page, type, true); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode->i_mode); - f2fs_dentry_kunmap(dir, page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = current_time(dir); @@ -350,13 +346,11 @@ static int make_empty_dir(struct inode *inode, if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); - dentry_blk = kmap_atomic(dentry_page); + dentry_blk = page_address(dentry_page); make_dentry_ptr_block(NULL, &d, dentry_blk); do_make_empty_dir(inode, parent, &d); - kunmap_atomic(dentry_blk); - set_page_dirty(dentry_page); f2fs_put_page(dentry_page, 1); return 0; @@ -367,6 +361,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, struct page *dpage) { struct page *page; + int dummy_encrypt = DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(dir)); int err; if (is_inode_flag_set(inode, FI_NEW_INODE)) { @@ -393,7 +388,8 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, if (err) goto put_error; - if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) { + if ((f2fs_encrypted_inode(dir) || dummy_encrypt) && + f2fs_may_encrypt(inode)) { err = fscrypt_inherit_context(dir, inode, page, false); if (err) goto put_error; @@ -402,8 +398,6 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, page = get_node_page(F2FS_I_SB(dir), inode->i_ino); if (IS_ERR(page)) return page; - - set_cold_node(inode, page); } if (new_name) { @@ -547,13 +541,12 @@ start: if (IS_ERR(dentry_page)) return PTR_ERR(dentry_page); - dentry_blk = kmap(dentry_page); + dentry_blk = page_address(dentry_page); bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, slots, NR_DENTRY_IN_BLOCK); if (bit_pos < NR_DENTRY_IN_BLOCK) goto add_dentry; - kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } @@ -588,7 +581,6 @@ fail: if (inode) up_write(&F2FS_I(inode)->i_sem); - kunmap(dentry_page); f2fs_put_page(dentry_page, 1); return err; @@ -642,7 +634,6 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, F2FS_I(dir)->task = NULL; } if (de) { - f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); err = -EEXIST; } else if (IS_ERR(page)) { @@ -713,7 +704,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); - add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO); + if (F2FS_OPTION(F2FS_I_SB(dir)).fsync_mode == FSYNC_MODE_STRICT) + add_ino_entry(F2FS_I_SB(dir), dir->i_ino, TRANS_DIR_INO); if (f2fs_has_inline_dentry(dir)) return f2fs_delete_inline_entry(dentry, page, dir, inode); @@ -730,7 +722,6 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, 0); - kunmap(page); /* kunmap - pair of f2fs_find_entry */ set_page_dirty(page); dir->i_ctime = dir->i_mtime = current_time(dir); @@ -775,7 +766,7 @@ bool f2fs_empty_dir(struct inode *dir) return false; } - dentry_blk = kmap_atomic(dentry_page); + dentry_blk = page_address(dentry_page); if (bidx == 0) bit_pos = 2; else @@ -783,7 +774,6 @@ bool f2fs_empty_dir(struct inode *dir) bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, NR_DENTRY_IN_BLOCK, bit_pos); - kunmap_atomic(dentry_blk); f2fs_put_page(dentry_page, 1); @@ -901,19 +891,17 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) } } - dentry_blk = kmap(dentry_page); + dentry_blk = page_address(dentry_page); make_dentry_ptr_block(inode, &d, dentry_blk); err = f2fs_fill_dentries(ctx, &d, n * NR_DENTRY_IN_BLOCK, &fstr); if (err) { - kunmap(dentry_page); f2fs_put_page(dentry_page, 1); break; } - kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } out_free: diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index ff2352a0ed15..d5a861bf2b42 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -460,7 +460,7 @@ static struct extent_node *__insert_extent_tree(struct inode *inode, struct rb_node *insert_parent) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct rb_node **p = &et->root.rb_node; + struct rb_node **p; struct rb_node *parent = NULL; struct extent_node *en = NULL; @@ -706,6 +706,9 @@ void f2fs_drop_extent_tree(struct inode *inode) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct extent_tree *et = F2FS_I(inode)->extent_tree; + if (!f2fs_may_extent_tree(inode)) + return; + set_inode_flag(inode, FI_NO_EXTENT); write_lock(&et->lock); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d2a6b688a622..e258f6536b56 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -99,9 +99,10 @@ extern char *fault_name[FAULT_MAX]; #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000 -#define clear_opt(sbi, option) ((sbi)->mount_opt.opt &= ~F2FS_MOUNT_##option) -#define set_opt(sbi, option) ((sbi)->mount_opt.opt |= F2FS_MOUNT_##option) -#define test_opt(sbi, option) ((sbi)->mount_opt.opt & F2FS_MOUNT_##option) +#define F2FS_OPTION(sbi) ((sbi)->mount_opt) +#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) +#define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) +#define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) #define ver_after(a, b) (typecheck(unsigned long long, a) && \ typecheck(unsigned long long, b) && \ @@ -114,7 +115,26 @@ typedef u32 block_t; /* typedef u32 nid_t; struct f2fs_mount_info { - unsigned int opt; + unsigned int opt; + int write_io_size_bits; /* Write IO size bits */ + block_t root_reserved_blocks; /* root reserved blocks */ + kuid_t s_resuid; /* reserved blocks for uid */ + kgid_t s_resgid; /* reserved blocks for gid */ + int active_logs; /* # of active logs */ + int inline_xattr_size; /* inline xattr size */ +#ifdef CONFIG_F2FS_FAULT_INJECTION + struct f2fs_fault_info fault_info; /* For fault injection */ +#endif +#ifdef CONFIG_QUOTA + /* Names of quota files with journalled quota */ + char *s_qf_names[MAXQUOTAS]; + int s_jquota_fmt; /* Format of quota to use */ +#endif + /* For which write hints are passed down to block layer */ + int whint_mode; + int alloc_mode; /* segment allocation policy */ + int fsync_mode; /* fsync policy */ + bool test_dummy_encryption; /* test dummy encryption */ }; #define F2FS_FEATURE_ENCRYPT 0x0001 @@ -126,6 +146,8 @@ struct f2fs_mount_info { #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040 #define F2FS_FEATURE_QUOTA_INO 0x0080 #define F2FS_FEATURE_INODE_CRTIME 0x0100 +#define F2FS_FEATURE_LOST_FOUND 0x0200 +#define F2FS_FEATURE_VERITY 0x0400 /* reserved */ #define F2FS_HAS_FEATURE(sb, mask) \ ((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0) @@ -509,7 +531,7 @@ static inline void make_dentry_ptr_block(struct inode *inode, d->inode = inode; d->max = NR_DENTRY_IN_BLOCK; d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; - d->bitmap = &t->dentry_bitmap; + d->bitmap = t->dentry_bitmap; d->dentry = t->dentry; d->filename = t->filename; } @@ -635,6 +657,8 @@ enum { #define FADVISE_ENCRYPT_BIT 0x04 #define FADVISE_ENC_NAME_BIT 0x08 #define FADVISE_KEEP_SIZE_BIT 0x10 +#define FADVISE_HOT_BIT 0x20 +#define FADVISE_VERITY_BIT 0x40 /* reserved */ #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) @@ -649,6 +673,9 @@ enum { #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) +#define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) +#define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) +#define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) #define DEF_DIR_LEVEL 0 @@ -696,6 +723,7 @@ struct f2fs_inode_info { kprojid_t i_projid; /* id for project quota */ int i_inline_xattr_size; /* inline xattr size */ struct timespec i_crtime; /* inode creation time */ + struct timespec i_disk_time[4]; /* inode disk times */ }; static inline void get_extent_info(struct extent_info *ext, @@ -802,7 +830,7 @@ struct f2fs_nm_info { unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ spinlock_t nid_list_lock; /* protect nid lists ops */ struct mutex build_lock; /* lock for build free nids */ - unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; + unsigned char **free_nid_bitmap; unsigned char *nat_block_bitmap; unsigned short *free_nid_count; /* free nid count of NAT block */ @@ -1035,6 +1063,7 @@ struct f2fs_io_info { bool submitted; /* indicate IO submission */ int need_lock; /* indicate we need to lock cp_rwsem */ bool in_list; /* indicate fio is in io_list */ + bool is_meta; /* indicate borrow meta inode mapping or not */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ }; @@ -1096,10 +1125,34 @@ enum { MAX_TIME, }; +enum { + WHINT_MODE_OFF, /* not pass down write hints */ + WHINT_MODE_USER, /* try to pass down hints given by users */ + WHINT_MODE_FS, /* pass down hints with F2FS policy */ +}; + +enum { + ALLOC_MODE_DEFAULT, /* stay default */ + ALLOC_MODE_REUSE, /* reuse segments as much as possible */ +}; + +enum fsync_mode { + FSYNC_MODE_POSIX, /* fsync follows posix semantics */ + FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ +}; + +#ifdef CONFIG_F2FS_FS_ENCRYPTION +#define DUMMY_ENCRYPTION_ENABLED(sbi) \ + (unlikely(F2FS_OPTION(sbi).test_dummy_encryption)) +#else +#define DUMMY_ENCRYPTION_ENABLED(sbi) (0) +#endif + struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ struct f2fs_super_block *raw_super; /* raw super block pointer */ + struct rw_semaphore sb_lock; /* lock for raw super block */ int valid_super_block; /* valid super block no */ unsigned long s_flag; /* flags for sbi */ @@ -1119,7 +1172,6 @@ struct f2fs_sb_info { struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ struct mutex wio_mutex[NR_PAGE_TYPE - 1][NR_TEMP_TYPE]; /* bio ordering for NODE/DATA */ - int write_io_size_bits; /* Write IO size bits */ mempool_t *write_io_dummy; /* Dummy pages */ /* for checkpoint */ @@ -1169,9 +1221,7 @@ struct f2fs_sb_info { unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ loff_t max_file_blocks; /* max block index of file */ - int active_logs; /* # of active logs */ int dir_level; /* directory level */ - int inline_xattr_size; /* inline xattr size */ unsigned int trigger_ssr_threshold; /* threshold to trigger ssr */ int readdir_ra; /* readahead inode in readdir */ @@ -1181,9 +1231,6 @@ struct f2fs_sb_info { block_t last_valid_block_count; /* for recovery */ block_t reserved_blocks; /* configurable reserved blocks */ block_t current_reserved_blocks; /* current reserved blocks */ - block_t root_reserved_blocks; /* root reserved blocks */ - kuid_t s_resuid; /* reserved blocks for uid */ - kgid_t s_resgid; /* reserved blocks for gid */ unsigned int nquota_files; /* # of quota sysfile */ @@ -1268,17 +1315,6 @@ struct f2fs_sb_info { /* Precomputed FS UUID checksum for seeding other checksums */ __u32 s_chksum_seed; - - /* For fault injection */ -#ifdef CONFIG_F2FS_FAULT_INJECTION - struct f2fs_fault_info fault_info; -#endif - -#ifdef CONFIG_QUOTA - /* Names of quota files with journalled quota */ - char *s_qf_names[MAXQUOTAS]; - int s_jquota_fmt; /* Format of quota to use */ -#endif }; #ifdef CONFIG_F2FS_FAULT_INJECTION @@ -1288,7 +1324,7 @@ struct f2fs_sb_info { __func__, __builtin_return_address(0)) static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) { - struct f2fs_fault_info *ffi = &sbi->fault_info; + struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; if (!ffi->inject_rate) return false; @@ -1645,12 +1681,12 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, return false; if (IS_NOQUOTA(inode)) return true; - if (capable(CAP_SYS_RESOURCE)) + if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) return true; - if (uid_eq(sbi->s_resuid, current_fsuid())) + if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && + in_group_p(F2FS_OPTION(sbi).s_resgid)) return true; - if (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && - in_group_p(sbi->s_resgid)) + if (capable(CAP_SYS_RESOURCE)) return true; return false; } @@ -1686,7 +1722,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, sbi->current_reserved_blocks; if (!__allow_reserved_blocks(sbi, inode)) - avail_user_block_count -= sbi->root_reserved_blocks; + avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { diff = sbi->total_valid_block_count - avail_user_block_count; @@ -1821,6 +1857,12 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); int offset; + if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { + offset = (flag == SIT_BITMAP) ? + le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; + return &ckpt->sit_nat_version_bitmap + offset; + } + if (__cp_payload(sbi) > 0) { if (flag == NAT_BITMAP) return &ckpt->sit_nat_version_bitmap; @@ -1887,7 +1929,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, sbi->current_reserved_blocks + 1; if (!__allow_reserved_blocks(sbi, inode)) - valid_block_count += sbi->root_reserved_blocks; + valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; if (unlikely(valid_block_count > sbi->user_block_count)) { spin_unlock(&sbi->stat_lock); @@ -2458,12 +2500,6 @@ static inline int f2fs_has_inline_dentry(struct inode *inode) return is_inode_flag_set(inode, FI_INLINE_DENTRY); } -static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page) -{ - if (!f2fs_has_inline_dentry(dir)) - kunmap(page); -} - static inline int is_file(struct inode *inode, int type) { return F2FS_I(inode)->i_advise & type; @@ -2495,7 +2531,17 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) } if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || file_keep_isize(inode) || - i_size_read(inode) & PAGE_MASK) + i_size_read(inode) & ~PAGE_MASK) + return false; + + if (!timespec_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime)) + return false; + if (!timespec_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime)) + return false; + if (!timespec_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime)) + return false; + if (!timespec_equal(F2FS_I(inode)->i_disk_time + 3, + &F2FS_I(inode)->i_crtime)) return false; down_read(&F2FS_I(inode)->i_sem); @@ -2505,8 +2551,7 @@ static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) return ret; } -#define sb_rdonly f2fs_readonly -static inline int f2fs_readonly(struct super_block *sb) +static inline bool f2fs_readonly(struct super_block *sb) { return sb->s_flags & MS_RDONLY; } @@ -2568,15 +2613,6 @@ static inline void *kvzalloc(size_t size, gfp_t flags) return ret; } -enum rw_hint { - WRITE_LIFE_NOT_SET = 0, - WRITE_LIFE_NONE = 1, /* RWH_WRITE_LIFE_NONE */ - WRITE_LIFE_SHORT = 2, /* RWH_WRITE_LIFE_SHORT */ - WRITE_LIFE_MEDIUM = 3, /* RWH_WRITE_LIFE_MEDIUM */ - WRITE_LIFE_LONG = 4, /* RWH_WRITE_LIFE_LONG */ - WRITE_LIFE_EXTREME = 5, /* RWH_WRITE_LIFE_EXTREME */ -}; - static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { @@ -2685,6 +2721,8 @@ void handle_failed_inode(struct inode *inode); /* * namei.c */ +int update_extension_list(struct f2fs_sb_info *sbi, const char *name, + bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); /* @@ -2857,6 +2895,8 @@ void destroy_segment_manager(struct f2fs_sb_info *sbi); int __init create_segment_manager_caches(void); void destroy_segment_manager_caches(void); int rw_hint_to_seg_type(enum rw_hint hint); +enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, enum page_type type, + enum temp_type temp); /* * checkpoint.c @@ -2939,6 +2979,7 @@ int f2fs_release_page(struct page *page, gfp_t wait); int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); #endif +bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); /* * gc.c @@ -3261,45 +3302,21 @@ static inline bool f2fs_bio_encrypted(struct bio *bio) return bio->bi_private != NULL; } -static inline int f2fs_sb_has_crypto(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT); -} - -static inline int f2fs_sb_mounted_blkzoned(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_BLKZONED); +#define F2FS_FEATURE_FUNCS(name, flagname) \ +static inline int f2fs_sb_has_##name(struct super_block *sb) \ +{ \ + return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_##flagname); \ } -static inline int f2fs_sb_has_extra_attr(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_EXTRA_ATTR); -} - -static inline int f2fs_sb_has_project_quota(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_PRJQUOTA); -} - -static inline int f2fs_sb_has_inode_chksum(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CHKSUM); -} - -static inline int f2fs_sb_has_flexible_inline_xattr(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR); -} - -static inline int f2fs_sb_has_quota_ino(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_QUOTA_INO); -} - -static inline int f2fs_sb_has_inode_crtime(struct super_block *sb) -{ - return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_INODE_CRTIME); -} +F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); +F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); +F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); +F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); +F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); +F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); +F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); +F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); +F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); #ifdef CONFIG_BLK_DEV_ZONED static inline int get_blkz_type(struct f2fs_sb_info *sbi, @@ -3319,7 +3336,7 @@ static inline bool f2fs_discard_en(struct f2fs_sb_info *sbi) { struct request_queue *q = bdev_get_queue(sbi->sb->s_bdev); - return blk_queue_discard(q) || f2fs_sb_mounted_blkzoned(sbi->sb); + return blk_queue_discard(q) || f2fs_sb_has_blkzoned(sbi->sb); } static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt) @@ -3348,4 +3365,11 @@ static inline bool f2fs_may_encrypt(struct inode *inode) #endif } +static inline bool f2fs_force_buffered_io(struct inode *inode, int rw) +{ + return (f2fs_encrypted_file(inode) || + (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) || + F2FS_I_SB(inode)->s_ndevs); +} + #endif diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 65cda5bc61b7..39c3acb454a3 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -166,9 +166,10 @@ static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) cp_reason = CP_NODE_NEED_CP; else if (test_opt(sbi, FASTBOOT)) cp_reason = CP_FASTBOOT_MODE; - else if (sbi->active_logs == 2) + else if (F2FS_OPTION(sbi).active_logs == 2) cp_reason = CP_SPEC_LOG_NUM; - else if (need_dentry_mark(sbi, inode->i_ino) && + else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && + need_dentry_mark(sbi, inode->i_ino) && exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) cp_reason = CP_RECOVER_DIR; @@ -481,6 +482,9 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) if (err) return err; + + filp->f_mode |= FMODE_NOWAIT; + return dquot_file_open(inode, filp); } @@ -571,7 +575,6 @@ truncate_out: int truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - unsigned int blocksize = inode->i_sb->s_blocksize; struct dnode_of_data dn; pgoff_t free_from; int count = 0, err = 0; @@ -580,7 +583,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) trace_f2fs_truncate_blocks_enter(inode, from); - free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); + free_from = (pgoff_t)F2FS_BLK_ALIGN(from); if (free_from >= sbi->max_file_blocks) goto free_partial; @@ -1354,8 +1357,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, } out: - if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) - f2fs_i_size_write(inode, new_size); + if (new_size > i_size_read(inode)) { + if (mode & FALLOC_FL_KEEP_SIZE) + file_set_keep_isize(inode); + else + f2fs_i_size_write(inode, new_size); + } out_sem: up_write(&F2FS_I(inode)->i_mmap_sem); @@ -1709,6 +1716,8 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) inode_lock(inode); + down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + if (f2fs_is_volatile_file(inode)) goto err_out; @@ -1727,6 +1736,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); } err_out: + up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); inode_unlock(inode); mnt_drop_write_file(filp); return ret; @@ -1936,7 +1946,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); - if (!f2fs_sb_has_crypto(inode->i_sb)) + if (!f2fs_sb_has_encrypt(inode->i_sb)) return -EOPNOTSUPP; f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); @@ -1946,7 +1956,7 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) { - if (!f2fs_sb_has_crypto(file_inode(filp)->i_sb)) + if (!f2fs_sb_has_encrypt(file_inode(filp)->i_sb)) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); } @@ -1957,16 +1967,18 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; - if (!f2fs_sb_has_crypto(inode->i_sb)) + if (!f2fs_sb_has_encrypt(inode->i_sb)) return -EOPNOTSUPP; - if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) - goto got_it; - err = mnt_want_write_file(filp); if (err) return err; + down_write(&sbi->sb_lock); + + if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) + goto got_it; + /* update superblock with uuid */ generate_random_uuid(sbi->raw_super->encrypt_pw_salt); @@ -1974,15 +1986,16 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) if (err) { /* undo new data */ memset(sbi->raw_super->encrypt_pw_salt, 0, 16); - mnt_drop_write_file(filp); - return err; + goto out_err; } - mnt_drop_write_file(filp); got_it: if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 16)) - return -EFAULT; - return 0; + err = -EFAULT; +out_err: + up_write(&sbi->sb_lock); + mnt_drop_write_file(filp); + return err; } static int f2fs_ioc_gc(struct file *filp, unsigned long arg) @@ -2043,8 +2056,10 @@ static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) return ret; end = range.start + range.len; - if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) - return -EINVAL; + if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { + ret = -EINVAL; + goto out; + } do_more: if (!range.sync) { if (!mutex_trylock(&sbi->gc_mutex)) { @@ -2685,25 +2700,54 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; - inode_lock(inode); + if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) + return -EINVAL; + + if (!inode_trylock(inode)) { + if (iocb->ki_flags & IOCB_NOWAIT) + return -EAGAIN; + inode_lock(inode); + } + ret = generic_write_checks(iocb, from); if (ret > 0) { + bool preallocated = false; + size_t target_size = 0; int err; if (iov_iter_fault_in_readable(from, iov_iter_count(from))) set_inode_flag(inode, FI_NO_PREALLOC); - err = f2fs_preallocate_blocks(iocb, from); - if (err) { - clear_inode_flag(inode, FI_NO_PREALLOC); - inode_unlock(inode); - return err; + if ((iocb->ki_flags & IOCB_NOWAIT) && + (iocb->ki_flags & IOCB_DIRECT)) { + if (!f2fs_overwrite_io(inode, iocb->ki_pos, + iov_iter_count(from)) || + f2fs_has_inline_data(inode) || + f2fs_force_buffered_io(inode, WRITE)) { + inode_unlock(inode); + return -EAGAIN; + } + + } else { + preallocated = true; + target_size = iocb->ki_pos + iov_iter_count(from); + + err = f2fs_preallocate_blocks(iocb, from); + if (err) { + clear_inode_flag(inode, FI_NO_PREALLOC); + inode_unlock(inode); + return err; + } } blk_start_plug(&plug); ret = __generic_file_write_iter(iocb, from); blk_finish_plug(&plug); clear_inode_flag(inode, FI_NO_PREALLOC); + /* if we couldn't write data, we should deallocate blocks. */ + if (preallocated && i_size_read(inode) < target_size) + f2fs_truncate(inode); + if (ret > 0) f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index d0de3429c26c..54f51a990794 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -76,14 +76,15 @@ static int gc_thread_func(void *data) * invalidated soon after by user update or deletion. * So, I'd like to wait some time to collect dirty segments. */ - if (!mutex_trylock(&sbi->gc_mutex)) - goto next; - if (gc_th->gc_urgent) { wait_ms = gc_th->urgent_sleep_time; + mutex_lock(&sbi->gc_mutex); goto do_gc; } + if (!mutex_trylock(&sbi->gc_mutex)) + goto next; + if (!is_idle(sbi)) { increase_sleep_time(gc_th, &wait_ms); mutex_unlock(&sbi->gc_mutex); @@ -161,12 +162,17 @@ static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type) { int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY; - if (gc_th && gc_th->gc_idle) { + if (!gc_th) + return gc_mode; + + if (gc_th->gc_idle) { if (gc_th->gc_idle == 1) gc_mode = GC_CB; else if (gc_th->gc_idle == 2) gc_mode = GC_GREEDY; } + if (gc_th->gc_urgent) + gc_mode = GC_GREEDY; return gc_mode; } @@ -188,11 +194,14 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, } /* we need to check every dirty segments in the FG_GC case */ - if (gc_type != FG_GC && p->max_search > sbi->max_victim_search) + if (gc_type != FG_GC && + (sbi->gc_thread && !sbi->gc_thread->gc_urgent) && + p->max_search > sbi->max_victim_search) p->max_search = sbi->max_victim_search; - /* let's select beginning hot/small space first */ - if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) + /* let's select beginning hot/small space first in no_heap mode*/ + if (test_opt(sbi, NOHEAP) && + (type == CURSEG_HOT_DATA || IS_NODESEG(type))) p->offset = 0; else p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 91d5d831be72..37ab2159506a 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -387,7 +387,7 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, f2fs_wait_on_page_writeback(page, DATA, true); zero_user_segment(page, MAX_INLINE_DATA(dir), PAGE_SIZE); - dentry_blk = kmap_atomic(page); + dentry_blk = page_address(page); make_dentry_ptr_inline(dir, &src, inline_dentry); make_dentry_ptr_block(dir, &dst, dentry_blk); @@ -404,7 +404,6 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max); memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN); - kunmap_atomic(dentry_blk); if (!PageUptodate(page)) SetPageUptodate(page); set_page_dirty(page); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 89c838bfb067..51846fc54fbd 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -284,6 +284,10 @@ static int do_read_inode(struct inode *inode) fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec); } + F2FS_I(inode)->i_disk_time[0] = inode->i_atime; + F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; + F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; + F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; f2fs_put_page(node_page, 1); stat_inc_inline_xattr(inode); @@ -328,7 +332,7 @@ make_now: inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); + inode_nohighmem(inode); } else if (S_ISLNK(inode->i_mode)) { if (f2fs_encrypted_inode(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; @@ -439,12 +443,15 @@ void update_inode(struct inode *inode, struct page *node_page) } __set_inode_rdev(inode, ri); - set_cold_node(inode, node_page); /* deleted inode */ if (inode->i_nlink == 0) clear_inline_node(node_page); + F2FS_I(inode)->i_disk_time[0] = inode->i_atime; + F2FS_I(inode)->i_disk_time[1] = inode->i_ctime; + F2FS_I(inode)->i_disk_time[2] = inode->i_mtime; + F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime; } void update_inode_page(struct inode *inode) diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index da7f709e3926..5ec20f077629 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -78,7 +78,8 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) set_inode_flag(inode, FI_NEW_INODE); /* If the directory encrypted, then we should encrypt the inode. */ - if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode)) + if ((f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) && + f2fs_may_encrypt(inode)) f2fs_set_encrypted_inode(inode); if (f2fs_sb_has_extra_attr(sbi->sb)) { @@ -97,7 +98,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode) if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)) { f2fs_bug_on(sbi, !f2fs_has_extra_attr(inode)); if (f2fs_has_inline_xattr(inode)) - xattr_size = sbi->inline_xattr_size; + xattr_size = F2FS_OPTION(sbi).inline_xattr_size; /* Otherwise, will be 0 */ } else if (f2fs_has_inline_xattr(inode) || f2fs_has_inline_dentry(inode)) { @@ -142,7 +143,7 @@ fail_drop: return ERR_PTR(err); } -static int is_multimedia_file(const unsigned char *s, const char *sub) +static int is_extension_exist(const unsigned char *s, const char *sub) { size_t slen = strlen(s); size_t sublen = strlen(sub); @@ -168,19 +169,94 @@ static int is_multimedia_file(const unsigned char *s, const char *sub) /* * Set multimedia files as cold files for hot/cold data separation */ -static inline void set_cold_files(struct f2fs_sb_info *sbi, struct inode *inode, +static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *inode, const unsigned char *name) { - int i; - __u8 (*extlist)[8] = sbi->raw_super->extension_list; + __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; + int i, cold_count, hot_count; + + down_read(&sbi->sb_lock); + + cold_count = le32_to_cpu(sbi->raw_super->extension_count); + hot_count = sbi->raw_super->hot_ext_count; - int count = le32_to_cpu(sbi->raw_super->extension_count); - for (i = 0; i < count; i++) { - if (is_multimedia_file(name, extlist[i])) { + for (i = 0; i < cold_count + hot_count; i++) { + if (!is_extension_exist(name, extlist[i])) + continue; + if (i < cold_count) file_set_cold(inode); - break; - } + else + file_set_hot(inode); + break; + } + + up_read(&sbi->sb_lock); +} + +int update_extension_list(struct f2fs_sb_info *sbi, const char *name, + bool hot, bool set) +{ + __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; + int cold_count = le32_to_cpu(sbi->raw_super->extension_count); + int hot_count = sbi->raw_super->hot_ext_count; + int total_count = cold_count + hot_count; + int start, count; + int i; + + if (set) { + if (total_count == F2FS_MAX_EXTENSION) + return -EINVAL; + } else { + if (!hot && !cold_count) + return -EINVAL; + if (hot && !hot_count) + return -EINVAL; + } + + if (hot) { + start = cold_count; + count = total_count; + } else { + start = 0; + count = cold_count; + } + + for (i = start; i < count; i++) { + if (strcmp(name, extlist[i])) + continue; + + if (set) + return -EINVAL; + + memcpy(extlist[i], extlist[i + 1], + F2FS_EXTENSION_LEN * (total_count - i - 1)); + memset(extlist[total_count - 1], 0, F2FS_EXTENSION_LEN); + if (hot) + sbi->raw_super->hot_ext_count = hot_count - 1; + else + sbi->raw_super->extension_count = + cpu_to_le32(cold_count - 1); + return 0; + } + + if (!set) + return -EINVAL; + + if (hot) { + strncpy(extlist[count], name, strlen(name)); + sbi->raw_super->hot_ext_count = hot_count + 1; + } else { + char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN]; + + memcpy(buf, &extlist[cold_count], + F2FS_EXTENSION_LEN * hot_count); + memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN); + strncpy(extlist[cold_count], name, strlen(name)); + memcpy(&extlist[cold_count + 1], buf, + F2FS_EXTENSION_LEN * hot_count); + sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1); } + return 0; } static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, @@ -203,7 +279,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, return PTR_ERR(inode); if (!test_opt(sbi, DISABLE_EXT_IDENTIFY)) - set_cold_files(sbi, inode, dentry->d_name.name); + set_file_temperature(sbi, inode, dentry->d_name.name); inode->i_op = &f2fs_file_inode_operations; inode->i_fop = &f2fs_file_operations; @@ -317,7 +393,6 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino) de = f2fs_find_entry(dir, &dot, &page); if (de) { - f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); } else if (IS_ERR(page)) { err = PTR_ERR(page); @@ -329,14 +404,12 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino) } de = f2fs_find_entry(dir, &dotdot, &page); - if (de) { - f2fs_dentry_kunmap(dir, page); + if (de) f2fs_put_page(page, 0); - } else if (IS_ERR(page)) { + else if (IS_ERR(page)) err = PTR_ERR(page); - } else { + else err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR); - } out: if (!err) clear_inode_flag(dir, FI_INLINE_DOTS); @@ -377,7 +450,6 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, } ino = le32_to_cpu(de->ino); - f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); @@ -452,7 +524,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); - f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); goto fail; } @@ -483,27 +554,16 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t len = strlen(symname); - struct fscrypt_str disk_link = FSTR_INIT((char *)symname, len + 1); - struct fscrypt_symlink_data *sd = NULL; + struct fscrypt_str disk_link; int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; - if (f2fs_encrypted_inode(dir)) { - err = fscrypt_get_encryption_info(dir); - if (err) - return err; - - if (!fscrypt_has_encryption_key(dir)) - return -ENOKEY; - - disk_link.len = (fscrypt_fname_encrypted_size(dir, len) + - sizeof(struct fscrypt_symlink_data)); - } - - if (disk_link.len > dir->i_sb->s_blocksize) - return -ENAMETOOLONG; + err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize, + &disk_link); + if (err) + return err; err = dquot_initialize(dir); if (err) @@ -513,7 +573,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(inode)) return PTR_ERR(inode); - if (f2fs_encrypted_inode(inode)) + if (IS_ENCRYPTED(inode)) inode->i_op = &f2fs_encrypted_symlink_inode_operations; else inode->i_op = &f2fs_symlink_inode_operations; @@ -523,38 +583,13 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); if (err) - goto out; + goto out_handle_failed_inode; f2fs_unlock_op(sbi); alloc_nid_done(sbi, inode->i_ino); - if (f2fs_encrypted_inode(inode)) { - struct qstr istr = QSTR_INIT(symname, len); - struct fscrypt_str ostr; - - sd = f2fs_kzalloc(sbi, disk_link.len, GFP_NOFS); - if (!sd) { - err = -ENOMEM; - goto err_out; - } - - err = fscrypt_get_encryption_info(inode); - if (err) - goto err_out; - - if (!fscrypt_has_encryption_key(inode)) { - err = -ENOKEY; - goto err_out; - } - - ostr.name = sd->encrypted_path; - ostr.len = disk_link.len; - err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr); - if (err) - goto err_out; - - sd->len = cpu_to_le16(ostr.len); - disk_link.name = (char *)sd; - } + err = fscrypt_encrypt_symlink(inode, symname, len, &disk_link); + if (err) + goto err_out; err = page_symlink(inode, disk_link.name, disk_link.len); @@ -581,12 +616,14 @@ err_out: f2fs_unlink(dir, dentry); } - kfree(sd); - f2fs_balance_fs(sbi, true); - return err; -out: + goto out_free_encrypted_link; + +out_handle_failed_inode: handle_failed_inode(inode); +out_free_encrypted_link: + if (disk_link.name != (unsigned char *)symname) + kfree(disk_link.name); return err; } @@ -610,7 +647,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); + inode_nohighmem(inode); set_inode_flag(inode, FI_INC_LINK); f2fs_lock_op(sbi); @@ -748,10 +785,12 @@ out: static int f2fs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) { - if (unlikely(f2fs_cp_error(F2FS_I_SB(dir)))) + struct f2fs_sb_info *sbi = F2FS_I_SB(dir); + + if (unlikely(f2fs_cp_error(sbi))) return -EIO; - if (f2fs_encrypted_inode(dir)) { + if (f2fs_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) { int err = fscrypt_get_encryption_info(dir); if (err) return err; @@ -924,16 +963,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, } if (old_dir_entry) { - if (old_dir != new_dir && !whiteout) { + if (old_dir != new_dir && !whiteout) f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); - } else { - f2fs_dentry_kunmap(old_inode, old_dir_page); + else f2fs_put_page(old_dir_page, 0); - } f2fs_i_links_write(old_dir, false); } - add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); + if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) + add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); f2fs_unlock_op(sbi); @@ -943,20 +981,15 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, put_out_dir: f2fs_unlock_op(sbi); - if (new_page) { - f2fs_dentry_kunmap(new_dir, new_page); + if (new_page) f2fs_put_page(new_page, 0); - } out_whiteout: if (whiteout) iput(whiteout); out_dir: - if (old_dir_entry) { - f2fs_dentry_kunmap(old_inode, old_dir_page); + if (old_dir_entry) f2fs_put_page(old_dir_page, 0); - } out_old: - f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; @@ -1088,8 +1121,10 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, } f2fs_mark_inode_dirty_sync(new_dir, false); - add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO); - add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); + if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) { + add_ino_entry(sbi, old_dir->i_ino, TRANS_DIR_INO); + add_ino_entry(sbi, new_dir->i_ino, TRANS_DIR_INO); + } f2fs_unlock_op(sbi); @@ -1098,19 +1133,15 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, return 0; out_new_dir: if (new_dir_entry) { - f2fs_dentry_kunmap(new_inode, new_dir_page); f2fs_put_page(new_dir_page, 0); } out_old_dir: if (old_dir_entry) { - f2fs_dentry_kunmap(old_inode, old_dir_page); f2fs_put_page(old_dir_page, 0); } out_new: - f2fs_dentry_kunmap(new_dir, new_page); f2fs_put_page(new_page, 0); out_old: - f2fs_dentry_kunmap(old_dir, old_page); f2fs_put_page(old_page, 0); out: return err; @@ -1143,65 +1174,21 @@ static int f2fs_rename2(struct inode *old_dir, struct dentry *old_dentry, static const char *f2fs_encrypted_follow_link(struct dentry *dentry, void **cookie) { - struct page *cpage = NULL; - char *caddr, *paddr = NULL; - struct fscrypt_str cstr = FSTR_INIT(NULL, 0); - struct fscrypt_str pstr = FSTR_INIT(NULL, 0); - struct fscrypt_symlink_data *sd; struct inode *inode = d_inode(dentry); - u32 max_size = inode->i_sb->s_blocksize; - int res; - - res = fscrypt_get_encryption_info(inode); - if (res) - return ERR_PTR(res); - - cpage = read_mapping_page(inode->i_mapping, 0, NULL); - if (IS_ERR(cpage)) - return ERR_CAST(cpage); - caddr = page_address(cpage); - - /* Symlink is encrypted */ - sd = (struct fscrypt_symlink_data *)caddr; - cstr.name = sd->encrypted_path; - cstr.len = le16_to_cpu(sd->len); - - /* this is broken symlink case */ - if (unlikely(cstr.len == 0)) { - res = -ENOENT; - goto errout; - } - - if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { - /* Symlink data on the disk is corrupted */ - res = -EIO; - goto errout; - } - res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr); - if (res) - goto errout; - - res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr); - if (res) - goto errout; - - /* this is broken symlink case */ - if (unlikely(pstr.name[0] == 0)) { - res = -ENOENT; - goto errout; - } + struct page *page; + void *target; - paddr = pstr.name; + if (!dentry) + return ERR_PTR(-ECHILD); - /* Null-terminate the name */ - paddr[pstr.len] = '\0'; + page = read_mapping_page(inode->i_mapping, 0, NULL); + if (IS_ERR(page)) + return ERR_CAST(page); - put_page(cpage); - return *cookie = paddr; -errout: - fscrypt_fname_free_buffer(&pstr); - put_page(cpage); - return ERR_PTR(res); + target = fscrypt_get_symlink(inode, page_address(page), + inode->i_sb->s_blocksize); + put_page(page); + return *cookie = target; } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index c294d0feea08..157d768c7b31 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -193,8 +193,8 @@ static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) __free_nat_entry(e); } -static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, - struct nat_entry *ne) +static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, + struct nat_entry *ne) { nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); struct nat_entry_set *head; @@ -209,15 +209,36 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, head->entry_cnt = 0; f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); } + return head; +} + +static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, + struct nat_entry *ne) +{ + struct nat_entry_set *head; + bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; + + if (!new_ne) + head = __grab_nat_entry_set(nm_i, ne); + + /* + * update entry_cnt in below condition: + * 1. update NEW_ADDR to valid block address; + * 2. update old block address to new one; + */ + if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || + !get_nat_flag(ne, IS_DIRTY))) + head->entry_cnt++; + + set_nat_flag(ne, IS_PREALLOC, new_ne); if (get_nat_flag(ne, IS_DIRTY)) goto refresh_list; nm_i->dirty_nat_cnt++; - head->entry_cnt++; set_nat_flag(ne, IS_DIRTY, true); refresh_list: - if (nat_get_blkaddr(ne) == NEW_ADDR) + if (new_ne) list_del_init(&ne->list); else list_move_tail(&ne->list, &head->entry_list); @@ -1076,7 +1097,7 @@ struct page *new_node_page(struct dnode_of_data *dn, unsigned int ofs) f2fs_wait_on_page_writeback(page, NODE, true); fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); - set_cold_node(dn->inode, page); + set_cold_node(page, S_ISDIR(dn->inode->i_mode)); if (!PageUptodate(page)) SetPageUptodate(page); if (set_page_dirty(page)) @@ -2313,6 +2334,7 @@ retry: if (!PageUptodate(ipage)) SetPageUptodate(ipage); fill_node_footer(ipage, ino, ino, 0, true); + set_cold_node(page, false); src = F2FS_INODE(page); dst = F2FS_INODE(ipage); @@ -2602,8 +2624,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) if (!enabled_nat_bits(sbi, NULL)) return 0; - nm_i->nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 + - F2FS_BLKSIZE - 1); + nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); nm_i->nat_bits = f2fs_kzalloc(sbi, nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); if (!nm_i->nat_bits) @@ -2729,12 +2750,20 @@ static int init_node_manager(struct f2fs_sb_info *sbi) static int init_free_nid_cache(struct f2fs_sb_info *sbi) { struct f2fs_nm_info *nm_i = NM_I(sbi); + int i; - nm_i->free_nid_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks * - NAT_ENTRY_BITMAP_SIZE, GFP_KERNEL); + nm_i->free_nid_bitmap = f2fs_kzalloc(sbi, nm_i->nat_blocks * + sizeof(unsigned char *), GFP_KERNEL); if (!nm_i->free_nid_bitmap) return -ENOMEM; + for (i = 0; i < nm_i->nat_blocks; i++) { + nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, + NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL); + if (!nm_i->free_nid_bitmap) + return -ENOMEM; + } + nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, GFP_KERNEL); if (!nm_i->nat_block_bitmap) @@ -2825,7 +2854,13 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) up_write(&nm_i->nat_tree_lock); kvfree(nm_i->nat_block_bitmap); - kvfree(nm_i->free_nid_bitmap); + if (nm_i->free_nid_bitmap) { + int i; + + for (i = 0; i < nm_i->nat_blocks; i++) + kvfree(nm_i->free_nid_bitmap[i]); + kfree(nm_i->free_nid_bitmap); + } kvfree(nm_i->free_nid_count); kfree(nm_i->nat_bitmap); diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 081ef0d672bf..b95e49e4a928 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -44,6 +44,7 @@ enum { HAS_FSYNCED_INODE, /* is the inode fsynced before? */ HAS_LAST_FSYNC, /* has the latest node fsync mark? */ IS_DIRTY, /* this nat entry is dirty? */ + IS_PREALLOC, /* nat entry is preallocated */ }; /* @@ -422,12 +423,12 @@ static inline void clear_inline_node(struct page *page) ClearPageChecked(page); } -static inline void set_cold_node(struct inode *inode, struct page *page) +static inline void set_cold_node(struct page *page, bool is_dir) { struct f2fs_node *rn = F2FS_NODE(page); unsigned int flag = le32_to_cpu(rn->footer.flag); - if (S_ISDIR(inode->i_mode)) + if (is_dir) flag &= ~(0x1 << COLD_BIT_SHIFT); else flag |= (0x1 << COLD_BIT_SHIFT); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index b6d1ec620a8c..4ddc2262baf1 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -144,7 +144,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage, retry: de = __f2fs_find_entry(dir, &fname, &page); if (de && inode->i_ino == le32_to_cpu(de->ino)) - goto out_unmap_put; + goto out_put; if (de) { einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino)); @@ -153,19 +153,19 @@ retry: err = PTR_ERR(einode); if (err == -ENOENT) err = -EEXIST; - goto out_unmap_put; + goto out_put; } err = dquot_initialize(einode); if (err) { iput(einode); - goto out_unmap_put; + goto out_put; } err = acquire_orphan_inode(F2FS_I_SB(inode)); if (err) { iput(einode); - goto out_unmap_put; + goto out_put; } f2fs_delete_entry(de, page, dir, einode); iput(einode); @@ -180,8 +180,7 @@ retry: goto retry; goto out; -out_unmap_put: - f2fs_dentry_kunmap(dir, page); +out_put: f2fs_put_page(page, 0); out: if (file_enc_name(inode)) @@ -243,6 +242,9 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, struct curseg_info *curseg; struct page *page = NULL; block_t blkaddr; + unsigned int loop_cnt = 0; + unsigned int free_blocks = sbi->user_block_count - + valid_user_blocks(sbi); int err = 0; /* get node pages in the current segment */ @@ -295,6 +297,17 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head, if (IS_INODE(page) && is_dent_dnode(page)) entry->last_dentry = blkaddr; next: + /* sanity check in order to detect looped node chain */ + if (++loop_cnt >= free_blocks || + blkaddr == next_blkaddr_of_node(page)) { + f2fs_msg(sbi->sb, KERN_NOTICE, + "%s: detect looped node chain, " + "blkaddr:%u, next:%u", + __func__, blkaddr, next_blkaddr_of_node(page)); + err = -EINVAL; + break; + } + /* check next segment */ blkaddr = next_blkaddr_of_node(page); f2fs_put_page(page, 1); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index bf98f6f34b7e..d7bac60ad719 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1491,12 +1491,11 @@ static int issue_discard_thread(void *data) if (kthread_should_stop()) return 0; - if (dcc->discard_wake) { + if (dcc->discard_wake) dcc->discard_wake = 0; - if (sbi->gc_thread && sbi->gc_thread->gc_urgent) - init_discard_policy(&dpolicy, - DPOLICY_FORCE, 1); - } + + if (sbi->gc_thread && sbi->gc_thread->gc_urgent) + init_discard_policy(&dpolicy, DPOLICY_FORCE, 1); sb_start_intwrite(sbi->sb); @@ -1565,7 +1564,7 @@ static int __issue_discard_async(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { #ifdef CONFIG_BLK_DEV_ZONED - if (f2fs_sb_mounted_blkzoned(sbi->sb) && + if (f2fs_sb_has_blkzoned(sbi->sb) && bdev_zoned_model(bdev) != BLK_ZONED_NONE) return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); #endif @@ -1763,7 +1762,7 @@ find_next: sbi->blocks_per_seg, cur_pos); len = next_pos - cur_pos; - if (f2fs_sb_mounted_blkzoned(sbi->sb) || + if (f2fs_sb_has_blkzoned(sbi->sb) || (force && len < cpc->trim_minlen)) goto skip; @@ -1807,7 +1806,7 @@ void init_discard_policy(struct discard_policy *dpolicy, } else if (discard_type == DPOLICY_FORCE) { dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME; dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME; - dpolicy->io_aware = true; + dpolicy->io_aware = false; } else if (discard_type == DPOLICY_FSTRIM) { dpolicy->io_aware = false; } else if (discard_type == DPOLICY_UMOUNT) { @@ -1943,7 +1942,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) sbi->discard_blks--; /* don't overwrite by SSR to keep node chain */ - if (se->type == CURSEG_WARM_NODE) { + if (IS_NODESEG(se->type)) { if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks++; } @@ -2244,11 +2243,17 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) if (sbi->segs_per_sec != 1) return CURSEG_I(sbi, type)->segno; - if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) + if (test_opt(sbi, NOHEAP) && + (type == CURSEG_HOT_DATA || IS_NODESEG(type))) return 0; if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) return SIT_I(sbi)->last_victim[ALLOC_NEXT]; + + /* find segments from 0 to reuse freed segments */ + if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) + return 0; + return CURSEG_I(sbi, type)->segno; } @@ -2535,6 +2540,101 @@ int rw_hint_to_seg_type(enum rw_hint hint) } } +/* This returns write hints for each segment type. This hints will be + * passed down to block layer. There are mapping tables which depend on + * the mount option 'whint_mode'. + * + * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET. + * + * 2) whint_mode=user-based. F2FS tries to pass down hints given by users. + * + * User F2FS Block + * ---- ---- ----- + * META WRITE_LIFE_NOT_SET + * HOT_NODE " + * WARM_NODE " + * COLD_NODE " + * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME + * extension list " " + * + * -- buffered io + * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME + * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT + * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET + * WRITE_LIFE_NONE " " + * WRITE_LIFE_MEDIUM " " + * WRITE_LIFE_LONG " " + * + * -- direct io + * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME + * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT + * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET + * WRITE_LIFE_NONE " WRITE_LIFE_NONE + * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM + * WRITE_LIFE_LONG " WRITE_LIFE_LONG + * + * 3) whint_mode=fs-based. F2FS passes down hints with its policy. + * + * User F2FS Block + * ---- ---- ----- + * META WRITE_LIFE_MEDIUM; + * HOT_NODE WRITE_LIFE_NOT_SET + * WARM_NODE " + * COLD_NODE WRITE_LIFE_NONE + * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME + * extension list " " + * + * -- buffered io + * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME + * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT + * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG + * WRITE_LIFE_NONE " " + * WRITE_LIFE_MEDIUM " " + * WRITE_LIFE_LONG " " + * + * -- direct io + * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME + * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT + * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET + * WRITE_LIFE_NONE " WRITE_LIFE_NONE + * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM + * WRITE_LIFE_LONG " WRITE_LIFE_LONG + */ + +enum rw_hint io_type_to_rw_hint(struct f2fs_sb_info *sbi, + enum page_type type, enum temp_type temp) +{ + if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) { + if (type == DATA) { + if (temp == WARM) + return WRITE_LIFE_NOT_SET; + else if (temp == HOT) + return WRITE_LIFE_SHORT; + else if (temp == COLD) + return WRITE_LIFE_EXTREME; + } else { + return WRITE_LIFE_NOT_SET; + } + } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) { + if (type == DATA) { + if (temp == WARM) + return WRITE_LIFE_LONG; + else if (temp == HOT) + return WRITE_LIFE_SHORT; + else if (temp == COLD) + return WRITE_LIFE_EXTREME; + } else if (type == NODE) { + if (temp == WARM || temp == HOT) + return WRITE_LIFE_NOT_SET; + else if (temp == COLD) + return WRITE_LIFE_NONE; + } else if (type == META) { + return WRITE_LIFE_MEDIUM; + } + } + return WRITE_LIFE_NOT_SET; +} + static int __get_segment_type_2(struct f2fs_io_info *fio) { if (fio->type == DATA) @@ -2567,7 +2667,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio) if (is_cold_data(fio->page) || file_is_cold(inode)) return CURSEG_COLD_DATA; - if (is_inode_flag_set(inode, FI_HOT_DATA)) + if (file_is_hot(inode) || + is_inode_flag_set(inode, FI_HOT_DATA)) return CURSEG_HOT_DATA; /* rw_hint_to_seg_type(inode->i_write_hint); */ return CURSEG_WARM_DATA; @@ -2583,7 +2684,7 @@ static int __get_segment_type(struct f2fs_io_info *fio) { int type = 0; - switch (fio->sbi->active_logs) { + switch (F2FS_OPTION(fio->sbi).active_logs) { case 2: type = __get_segment_type_2(fio); break; @@ -2723,6 +2824,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page, struct f2fs_io_info fio = { .sbi = sbi, .type = META, + .temp = HOT, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_NOIDLE | REQ_META | REQ_PRIO, .old_blkaddr = page->index, @@ -2769,8 +2871,15 @@ void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) int rewrite_data_page(struct f2fs_io_info *fio) { int err; + struct f2fs_sb_info *sbi = fio->sbi; fio->new_blkaddr = fio->old_blkaddr; + /* i/o temperature is needed for passing down write hints */ + __get_segment_type(fio); + + f2fs_bug_on(sbi, !IS_DATASEG(get_seg_entry(sbi, + GET_SEGNO(sbi, fio->new_blkaddr))->type)); + stat_inc_inplace_blocks(fio->sbi); err = f2fs_submit_page_bio(fio); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 5d6d3e72be31..96a2d57ba8a4 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -53,13 +53,19 @@ ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \ (sbi)->segs_per_sec)) \ -#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr) -#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr) +#define MAIN_BLKADDR(sbi) \ + (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \ + le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr)) +#define SEG0_BLKADDR(sbi) \ + (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \ + le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr)) #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments) #define MAIN_SECS(sbi) ((sbi)->total_sections) -#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count) +#define TOTAL_SEGS(sbi) \ + (SM_I(sbi) ? SM_I(sbi)->segment_count : \ + le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count)) #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg) #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi)) @@ -596,6 +602,8 @@ static inline int utilization(struct f2fs_sb_info *sbi) #define DEF_MIN_FSYNC_BLOCKS 8 #define DEF_MIN_HOT_BLOCKS 16 +#define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */ + enum { F2FS_IPU_FORCE, F2FS_IPU_SSR, @@ -630,10 +638,17 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1); } -static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) +static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr) { - BUG_ON(blk_addr < SEG0_BLKADDR(sbi) - || blk_addr >= MAX_BLKADDR(sbi)); + struct f2fs_sb_info *sbi = fio->sbi; + + if (PAGE_TYPE_OF_BIO(fio->type) == META && + (!is_read_io(fio->op) || fio->is_meta)) + BUG_ON(blk_addr < SEG0_BLKADDR(sbi) || + blk_addr >= MAIN_BLKADDR(sbi)); + else + BUG_ON(blk_addr < MAIN_BLKADDR(sbi) || + blk_addr >= MAX_BLKADDR(sbi)); } /* diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index aaeba346e9d7..a622eb4f59f2 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -60,7 +60,7 @@ char *fault_name[FAULT_MAX] = { static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate) { - struct f2fs_fault_info *ffi = &sbi->fault_info; + struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; if (rate) { atomic_set(&ffi->inject_ops, 0); @@ -129,6 +129,10 @@ enum { Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, + Opt_whint, + Opt_alloc, + Opt_fsync, + Opt_test_dummy_encryption, Opt_err, }; @@ -182,6 +186,10 @@ static match_table_t f2fs_tokens = { {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, + {Opt_whint, "whint_mode=%s"}, + {Opt_alloc, "alloc_mode=%s"}, + {Opt_fsync, "fsync_mode=%s"}, + {Opt_test_dummy_encryption, "test_dummy_encryption"}, {Opt_err, NULL}, }; @@ -202,21 +210,24 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi) block_t limit = (sbi->user_block_count << 1) / 1000; /* limit is 0.2% */ - if (test_opt(sbi, RESERVE_ROOT) && sbi->root_reserved_blocks > limit) { - sbi->root_reserved_blocks = limit; + if (test_opt(sbi, RESERVE_ROOT) && + F2FS_OPTION(sbi).root_reserved_blocks > limit) { + F2FS_OPTION(sbi).root_reserved_blocks = limit; f2fs_msg(sbi->sb, KERN_INFO, "Reduce reserved blocks for root = %u", - sbi->root_reserved_blocks); + F2FS_OPTION(sbi).root_reserved_blocks); } if (!test_opt(sbi, RESERVE_ROOT) && - (!uid_eq(sbi->s_resuid, + (!uid_eq(F2FS_OPTION(sbi).s_resuid, make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || - !gid_eq(sbi->s_resgid, + !gid_eq(F2FS_OPTION(sbi).s_resgid, make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) f2fs_msg(sbi->sb, KERN_INFO, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", - from_kuid_munged(&init_user_ns, sbi->s_resuid), - from_kgid_munged(&init_user_ns, sbi->s_resgid)); + from_kuid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resuid), + from_kgid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resgid)); } static void init_once(void *foo) @@ -236,7 +247,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, char *qname; int ret = -EINVAL; - if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { + if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { f2fs_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); @@ -254,8 +265,8 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, "Not enough memory for storing quotafile name"); return -EINVAL; } - if (sbi->s_qf_names[qtype]) { - if (strcmp(sbi->s_qf_names[qtype], qname) == 0) + if (F2FS_OPTION(sbi).s_qf_names[qtype]) { + if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) ret = 0; else f2fs_msg(sb, KERN_ERR, @@ -268,7 +279,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, "quotafile must be on filesystem root"); goto errout; } - sbi->s_qf_names[qtype] = qname; + F2FS_OPTION(sbi).s_qf_names[qtype] = qname; set_opt(sbi, QUOTA); return 0; errout: @@ -280,13 +291,13 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype) { struct f2fs_sb_info *sbi = F2FS_SB(sb); - if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { + if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return -EINVAL; } - kfree(sbi->s_qf_names[qtype]); - sbi->s_qf_names[qtype] = NULL; + kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); + F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; return 0; } @@ -302,15 +313,19 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) "Cannot enable project quota enforcement."); return -1; } - if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA] || - sbi->s_qf_names[PRJQUOTA]) { - if (test_opt(sbi, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) + if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || + F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || + F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { + if (test_opt(sbi, USRQUOTA) && + F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) clear_opt(sbi, USRQUOTA); - if (test_opt(sbi, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) + if (test_opt(sbi, GRPQUOTA) && + F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) clear_opt(sbi, GRPQUOTA); - if (test_opt(sbi, PRJQUOTA) && sbi->s_qf_names[PRJQUOTA]) + if (test_opt(sbi, PRJQUOTA) && + F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) clear_opt(sbi, PRJQUOTA); if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || @@ -320,19 +335,19 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) return -1; } - if (!sbi->s_jquota_fmt) { + if (!F2FS_OPTION(sbi).s_jquota_fmt) { f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " "not specified"); return -1; } } - if (f2fs_sb_has_quota_ino(sbi->sb) && sbi->s_jquota_fmt) { + if (f2fs_sb_has_quota_ino(sbi->sb) && F2FS_OPTION(sbi).s_jquota_fmt) { f2fs_msg(sbi->sb, KERN_INFO, "QUOTA feature is enabled, so ignore jquota_fmt"); - sbi->s_jquota_fmt = 0; + F2FS_OPTION(sbi).s_jquota_fmt = 0; } - if (f2fs_sb_has_quota_ino(sbi->sb) && sb_rdonly(sbi->sb)) { + if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) { f2fs_msg(sbi->sb, KERN_INFO, "Filesystem with quota feature cannot be mounted RDWR " "without CONFIG_QUOTA"); @@ -403,14 +418,14 @@ static int parse_options(struct super_block *sb, char *options) q = bdev_get_queue(sb->s_bdev); if (blk_queue_discard(q)) { set_opt(sbi, DISCARD); - } else if (!f2fs_sb_mounted_blkzoned(sb)) { + } else if (!f2fs_sb_has_blkzoned(sb)) { f2fs_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } break; case Opt_nodiscard: - if (f2fs_sb_mounted_blkzoned(sb)) { + if (f2fs_sb_has_blkzoned(sb)) { f2fs_msg(sb, KERN_WARNING, "discard is required for zoned block devices"); return -EINVAL; @@ -440,7 +455,7 @@ static int parse_options(struct super_block *sb, char *options) if (args->from && match_int(args, &arg)) return -EINVAL; set_opt(sbi, INLINE_XATTR_SIZE); - sbi->inline_xattr_size = arg; + F2FS_OPTION(sbi).inline_xattr_size = arg; break; #else case Opt_user_xattr: @@ -480,7 +495,7 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) return -EINVAL; - sbi->active_logs = arg; + F2FS_OPTION(sbi).active_logs = arg; break; case Opt_disable_ext_identify: set_opt(sbi, DISABLE_EXT_IDENTIFY); @@ -524,9 +539,9 @@ static int parse_options(struct super_block *sb, char *options) if (test_opt(sbi, RESERVE_ROOT)) { f2fs_msg(sb, KERN_INFO, "Preserve previous reserve_root=%u", - sbi->root_reserved_blocks); + F2FS_OPTION(sbi).root_reserved_blocks); } else { - sbi->root_reserved_blocks = arg; + F2FS_OPTION(sbi).root_reserved_blocks = arg; set_opt(sbi, RESERVE_ROOT); } break; @@ -539,7 +554,7 @@ static int parse_options(struct super_block *sb, char *options) "Invalid uid value %d", arg); return -EINVAL; } - sbi->s_resuid = uid; + F2FS_OPTION(sbi).s_resuid = uid; break; case Opt_resgid: if (args->from && match_int(args, &arg)) @@ -550,7 +565,7 @@ static int parse_options(struct super_block *sb, char *options) "Invalid gid value %d", arg); return -EINVAL; } - sbi->s_resgid = gid; + F2FS_OPTION(sbi).s_resgid = gid; break; case Opt_mode: name = match_strdup(&args[0]); @@ -559,7 +574,7 @@ static int parse_options(struct super_block *sb, char *options) return -ENOMEM; if (strlen(name) == 8 && !strncmp(name, "adaptive", 8)) { - if (f2fs_sb_mounted_blkzoned(sb)) { + if (f2fs_sb_has_blkzoned(sb)) { f2fs_msg(sb, KERN_WARNING, "adaptive mode is not allowed with " "zoned block device feature"); @@ -585,7 +600,7 @@ static int parse_options(struct super_block *sb, char *options) 1 << arg, BIO_MAX_PAGES); return -EINVAL; } - sbi->write_io_size_bits = arg; + F2FS_OPTION(sbi).write_io_size_bits = arg; break; case Opt_fault_injection: if (args->from && match_int(args, &arg)) @@ -646,13 +661,13 @@ static int parse_options(struct super_block *sb, char *options) return ret; break; case Opt_jqfmt_vfsold: - sbi->s_jquota_fmt = QFMT_VFS_OLD; + F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; break; case Opt_jqfmt_vfsv0: - sbi->s_jquota_fmt = QFMT_VFS_V0; + F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; break; case Opt_jqfmt_vfsv1: - sbi->s_jquota_fmt = QFMT_VFS_V1; + F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; break; case Opt_noquota: clear_opt(sbi, QUOTA); @@ -679,6 +694,73 @@ static int parse_options(struct super_block *sb, char *options) "quota operations not supported"); break; #endif + case Opt_whint: + name = match_strdup(&args[0]); + if (!name) + return -ENOMEM; + if (strlen(name) == 10 && + !strncmp(name, "user-based", 10)) { + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER; + } else if (strlen(name) == 3 && + !strncmp(name, "off", 3)) { + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; + } else if (strlen(name) == 8 && + !strncmp(name, "fs-based", 8)) { + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; + } else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; + case Opt_alloc: + name = match_strdup(&args[0]); + if (!name) + return -ENOMEM; + + if (strlen(name) == 7 && + !strncmp(name, "default", 7)) { + F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; + } else if (strlen(name) == 5 && + !strncmp(name, "reuse", 5)) { + F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; + } else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; + case Opt_fsync: + name = match_strdup(&args[0]); + if (!name) + return -ENOMEM; + if (strlen(name) == 5 && + !strncmp(name, "posix", 5)) { + F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; + } else if (strlen(name) == 6 && + !strncmp(name, "strict", 6)) { + F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; + } else { + kfree(name); + return -EINVAL; + } + kfree(name); + break; + case Opt_test_dummy_encryption: +#ifdef CONFIG_F2FS_FS_ENCRYPTION + if (!f2fs_sb_has_encrypt(sb)) { + f2fs_msg(sb, KERN_ERR, "Encrypt feature is off"); + return -EINVAL; + } + + F2FS_OPTION(sbi).test_dummy_encryption = true; + f2fs_msg(sb, KERN_INFO, + "Test dummy encryption mode enabled"); +#else + f2fs_msg(sb, KERN_INFO, + "Test dummy encryption mount option ignored"); +#endif + break; default: f2fs_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" or missing value", @@ -699,14 +781,22 @@ static int parse_options(struct super_block *sb, char *options) } if (test_opt(sbi, INLINE_XATTR_SIZE)) { + if (!f2fs_sb_has_extra_attr(sb) || + !f2fs_sb_has_flexible_inline_xattr(sb)) { + f2fs_msg(sb, KERN_ERR, + "extra_attr or flexible_inline_xattr " + "feature is off"); + return -EINVAL; + } if (!test_opt(sbi, INLINE_XATTR)) { f2fs_msg(sb, KERN_ERR, "inline_xattr_size option should be " "set with inline_xattr option"); return -EINVAL; } - if (!sbi->inline_xattr_size || - sbi->inline_xattr_size >= DEF_ADDRS_PER_INODE - + if (!F2FS_OPTION(sbi).inline_xattr_size || + F2FS_OPTION(sbi).inline_xattr_size >= + DEF_ADDRS_PER_INODE - F2FS_TOTAL_EXTRA_ATTR_SIZE - DEF_INLINE_RESERVED_SIZE - DEF_MIN_INLINE_SIZE) { @@ -715,6 +805,12 @@ static int parse_options(struct super_block *sb, char *options) return -EINVAL; } } + + /* Not pass down write hints if the number of active logs is lesser + * than NR_CURSEG_TYPE. + */ + if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE) + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; return 0; } @@ -731,7 +827,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Initialize f2fs-specific inode info */ atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; - fi->i_advise = 0; init_rwsem(&fi->i_sem); INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->gdirty_list); @@ -743,10 +838,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) init_rwsem(&fi->i_mmap_sem); init_rwsem(&fi->i_xattr_sem); -#ifdef CONFIG_QUOTA - memset(&fi->i_dquot, 0, sizeof(fi->i_dquot)); - fi->i_reserved_quota = 0; -#endif /* Will be used by directory only */ fi->i_dir_level = F2FS_SB(sb)->dir_level; @@ -957,7 +1048,7 @@ static void f2fs_put_super(struct super_block *sb) mempool_destroy(sbi->write_io_dummy); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kfree(sbi->s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif destroy_percpu_info(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) @@ -1071,8 +1162,9 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_blocks = total_count - start_count; buf->f_bfree = user_block_count - valid_user_blocks(sbi) - sbi->current_reserved_blocks; - if (buf->f_bfree > sbi->root_reserved_blocks) - buf->f_bavail = buf->f_bfree - sbi->root_reserved_blocks; + if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) + buf->f_bavail = buf->f_bfree - + F2FS_OPTION(sbi).root_reserved_blocks; else buf->f_bavail = 0; @@ -1107,10 +1199,10 @@ static inline void f2fs_show_quota_options(struct seq_file *seq, #ifdef CONFIG_QUOTA struct f2fs_sb_info *sbi = F2FS_SB(sb); - if (sbi->s_jquota_fmt) { + if (F2FS_OPTION(sbi).s_jquota_fmt) { char *fmtname = ""; - switch (sbi->s_jquota_fmt) { + switch (F2FS_OPTION(sbi).s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; @@ -1124,14 +1216,17 @@ static inline void f2fs_show_quota_options(struct seq_file *seq, seq_printf(seq, ",jqfmt=%s", fmtname); } - if (sbi->s_qf_names[USRQUOTA]) - seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]); + if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) + seq_show_option(seq, "usrjquota", + F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); - if (sbi->s_qf_names[GRPQUOTA]) - seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]); + if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) + seq_show_option(seq, "grpjquota", + F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); - if (sbi->s_qf_names[PRJQUOTA]) - seq_show_option(seq, "prjjquota", sbi->s_qf_names[PRJQUOTA]); + if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) + seq_show_option(seq, "prjjquota", + F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); #endif } @@ -1166,7 +1261,7 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",noinline_xattr"); if (test_opt(sbi, INLINE_XATTR_SIZE)) seq_printf(seq, ",inline_xattr_size=%u", - sbi->inline_xattr_size); + F2FS_OPTION(sbi).inline_xattr_size); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) @@ -1202,18 +1297,20 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, "adaptive"); else if (test_opt(sbi, LFS)) seq_puts(seq, "lfs"); - seq_printf(seq, ",active_logs=%u", sbi->active_logs); + seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); if (test_opt(sbi, RESERVE_ROOT)) seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u", - sbi->root_reserved_blocks, - from_kuid_munged(&init_user_ns, sbi->s_resuid), - from_kgid_munged(&init_user_ns, sbi->s_resgid)); + F2FS_OPTION(sbi).root_reserved_blocks, + from_kuid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resuid), + from_kgid_munged(&init_user_ns, + F2FS_OPTION(sbi).s_resgid)); if (F2FS_IO_SIZE_BITS(sbi)) seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi)); #ifdef CONFIG_F2FS_FAULT_INJECTION if (test_opt(sbi, FAULT_INJECTION)) seq_printf(seq, ",fault_injection=%u", - sbi->fault_info.inject_rate); + F2FS_OPTION(sbi).fault_info.inject_rate); #endif #ifdef CONFIG_QUOTA if (test_opt(sbi, QUOTA)) @@ -1226,15 +1323,37 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",prjquota"); #endif f2fs_show_quota_options(seq, sbi->sb); + if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) + seq_printf(seq, ",whint_mode=%s", "user-based"); + else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) + seq_printf(seq, ",whint_mode=%s", "fs-based"); +#ifdef CONFIG_F2FS_FS_ENCRYPTION + if (F2FS_OPTION(sbi).test_dummy_encryption) + seq_puts(seq, ",test_dummy_encryption"); +#endif + + if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) + seq_printf(seq, ",alloc_mode=%s", "default"); + else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) + seq_printf(seq, ",alloc_mode=%s", "reuse"); + if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) + seq_printf(seq, ",fsync_mode=%s", "posix"); + else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) + seq_printf(seq, ",fsync_mode=%s", "strict"); return 0; } static void default_options(struct f2fs_sb_info *sbi) { /* init some FS parameters */ - sbi->active_logs = NR_CURSEG_TYPE; - sbi->inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE; + F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; + F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; + F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; + F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; + F2FS_OPTION(sbi).test_dummy_encryption = false; + sbi->readdir_ra = 1; set_opt(sbi, BG_GC); set_opt(sbi, INLINE_XATTR); @@ -1244,7 +1363,7 @@ static void default_options(struct f2fs_sb_info *sbi) set_opt(sbi, NOHEAP); sbi->sb->s_flags |= MS_LAZYTIME; set_opt(sbi, FLUSH_MERGE); - if (f2fs_sb_mounted_blkzoned(sbi->sb)) { + if (f2fs_sb_has_blkzoned(sbi->sb)) { set_opt_mode(sbi, F2FS_MOUNT_LFS); set_opt(sbi, DISCARD); } else { @@ -1271,16 +1390,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_mount_info org_mount_opt; unsigned long old_sb_flags; - int err, active_logs; + int err; bool need_restart_gc = false; bool need_stop_gc = false; bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); -#ifdef CONFIG_F2FS_FAULT_INJECTION - struct f2fs_fault_info ffi = sbi->fault_info; -#endif #ifdef CONFIG_QUOTA - int s_jquota_fmt; - char *s_qf_names[MAXQUOTAS]; int i, j; #endif @@ -1290,21 +1404,21 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) */ org_mount_opt = sbi->mount_opt; old_sb_flags = sb->s_flags; - active_logs = sbi->active_logs; #ifdef CONFIG_QUOTA - s_jquota_fmt = sbi->s_jquota_fmt; + org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { - if (sbi->s_qf_names[i]) { - s_qf_names[i] = kstrdup(sbi->s_qf_names[i], - GFP_KERNEL); - if (!s_qf_names[i]) { + if (F2FS_OPTION(sbi).s_qf_names[i]) { + org_mount_opt.s_qf_names[i] = + kstrdup(F2FS_OPTION(sbi).s_qf_names[i], + GFP_KERNEL); + if (!org_mount_opt.s_qf_names[i]) { for (j = 0; j < i; j++) - kfree(s_qf_names[j]); + kfree(org_mount_opt.s_qf_names[j]); return -ENOMEM; } } else { - s_qf_names[i] = NULL; + org_mount_opt.s_qf_names[i] = NULL; } } #endif @@ -1374,7 +1488,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) need_stop_gc = true; } - if (*flags & MS_RDONLY) { + if (*flags & MS_RDONLY || + F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) { writeback_inodes_sb(sb, WB_REASON_SYNC); sync_inodes_sb(sb); @@ -1400,7 +1515,7 @@ skip: #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) - kfree(s_qf_names[i]); + kfree(org_mount_opt.s_qf_names[i]); #endif /* Update the POSIXACL Flag */ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | @@ -1418,18 +1533,14 @@ restore_gc: } restore_opts: #ifdef CONFIG_QUOTA - sbi->s_jquota_fmt = s_jquota_fmt; + F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { - kfree(sbi->s_qf_names[i]); - sbi->s_qf_names[i] = s_qf_names[i]; + kfree(F2FS_OPTION(sbi).s_qf_names[i]); + F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; } #endif sbi->mount_opt = org_mount_opt; - sbi->active_logs = active_logs; sb->s_flags = old_sb_flags; -#ifdef CONFIG_F2FS_FAULT_INJECTION - sbi->fault_info = ffi; -#endif return err; } @@ -1457,7 +1568,7 @@ static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data, while (toread > 0) { tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); repeat: - page = read_mapping_page(mapping, blkidx, NULL); + page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); if (IS_ERR(page)) { if (PTR_ERR(page) == -ENOMEM) { congestion_wait(BLK_RW_ASYNC, HZ/50); @@ -1551,8 +1662,8 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode) static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) { - return dquot_quota_on_mount(sbi->sb, sbi->s_qf_names[type], - sbi->s_jquota_fmt, type); + return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], + F2FS_OPTION(sbi).s_jquota_fmt, type); } int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) @@ -1571,7 +1682,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) } for (i = 0; i < MAXQUOTAS; i++) { - if (sbi->s_qf_names[i]) { + if (F2FS_OPTION(sbi).s_qf_names[i]) { err = f2fs_quota_on_mount(sbi, i); if (!err) { enabled = 1; @@ -1801,11 +1912,28 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, void *fs_data) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + + /* + * Encrypting the root directory is not allowed because fsck + * expects lost+found directory to exist and remain unencrypted + * if LOST_FOUND feature is enabled. + * + */ + if (f2fs_sb_has_lost_found(sbi->sb) && + inode->i_ino == F2FS_ROOT_INO(sbi)) + return -EPERM; + return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len, fs_data, XATTR_CREATE); } +static bool f2fs_dummy_context(struct inode *inode) +{ + return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode)); +} + static unsigned f2fs_max_namelen(struct inode *inode) { return S_ISLNK(inode->i_mode) ? @@ -1816,6 +1944,7 @@ static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, .set_context = f2fs_set_context, + .dummy_context = f2fs_dummy_context, .empty_dir = f2fs_empty_dir, .max_namelen = f2fs_max_namelen, }; @@ -1898,7 +2027,6 @@ static int __f2fs_commit_super(struct buffer_head *bh, lock_buffer(bh); if (super) memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); - set_buffer_uptodate(bh); set_buffer_dirty(bh); unlock_buffer(bh); @@ -2185,6 +2313,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi) sbi->dirty_device = 0; spin_lock_init(&sbi->dev_lock); + + init_rwsem(&sbi->sb_lock); } static int init_percpu_info(struct f2fs_sb_info *sbi) @@ -2210,7 +2340,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) unsigned int n = 0; int err = -EIO; - if (!f2fs_sb_mounted_blkzoned(sbi->sb)) + if (!f2fs_sb_has_blkzoned(sbi->sb)) return 0; if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != @@ -2338,7 +2468,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) } /* write back-up superblock first */ - bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); + bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1); if (!bh) return -EIO; err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); @@ -2349,7 +2479,7 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) return err; /* write current valid superblock */ - bh = sb_getblk(sbi->sb, sbi->valid_super_block); + bh = sb_bread(sbi->sb, sbi->valid_super_block); if (!bh) return -EIO; err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); @@ -2421,7 +2551,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) #ifdef CONFIG_BLK_DEV_ZONED if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && - !f2fs_sb_mounted_blkzoned(sbi->sb)) { + !f2fs_sb_has_blkzoned(sbi->sb)) { f2fs_msg(sbi->sb, KERN_ERR, "Zoned block device feature not enabled\n"); return -EINVAL; @@ -2455,6 +2585,18 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) return 0; } +static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) +{ + struct f2fs_sm_info *sm_i = SM_I(sbi); + + /* adjust parameters according to the volume size */ + if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) { + F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; + sm_i->dcc_info->discard_granularity = 1; + sm_i->ipu_policy = 1 << F2FS_IPU_FORCE; + } +} + static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; @@ -2502,8 +2644,8 @@ try_onemore: sb->s_fs_info = sbi; sbi->raw_super = raw_super; - sbi->s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); - sbi->s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); + F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); + F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); /* precompute checksum seed for metadata */ if (f2fs_sb_has_inode_chksum(sb)) @@ -2516,7 +2658,7 @@ try_onemore: * devices, but mandatory for host-managed zoned block devices. */ #ifndef CONFIG_BLK_DEV_ZONED - if (f2fs_sb_mounted_blkzoned(sb)) { + if (f2fs_sb_has_blkzoned(sb)) { f2fs_msg(sb, KERN_ERR, "Zoned block device support is not enabled\n"); err = -EOPNOTSUPP; @@ -2732,7 +2874,7 @@ try_onemore: * Turn on quotas which were not enabled for read-only mounts if * filesystem has quota feature, so that they are updated correctly. */ - if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) { + if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) { err = f2fs_enable_quotas(sb); if (err) { f2fs_msg(sb, KERN_ERR, @@ -2807,6 +2949,8 @@ skip_recovery: f2fs_join_shrinker(sbi); + f2fs_tuning_parameters(sbi); + f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", cur_cp_version(F2FS_CKPT(sbi))); f2fs_update_time(sbi, CP_TIME); @@ -2815,7 +2959,7 @@ skip_recovery: free_meta: #ifdef CONFIG_QUOTA - if (f2fs_sb_has_quota_ino(sb) && !sb_rdonly(sb)) + if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) f2fs_quota_off_umount(sbi->sb); #endif f2fs_sync_inode_meta(sbi); @@ -2859,7 +3003,7 @@ free_bio_info: free_options: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kfree(sbi->s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif kfree(options); free_sb_buf: diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index d978c7b6ea04..f33a56d6e6dd 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -58,7 +58,7 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) #ifdef CONFIG_F2FS_FAULT_INJECTION else if (struct_type == FAULT_INFO_RATE || struct_type == FAULT_INFO_TYPE) - return (unsigned char *)&sbi->fault_info; + return (unsigned char *)&F2FS_OPTION(sbi).fault_info; #endif return NULL; } @@ -92,10 +92,10 @@ static ssize_t features_show(struct f2fs_attr *a, if (!sb->s_bdev->bd_part) return snprintf(buf, PAGE_SIZE, "0\n"); - if (f2fs_sb_has_crypto(sb)) + if (f2fs_sb_has_encrypt(sb)) len += snprintf(buf, PAGE_SIZE - len, "%s", "encryption"); - if (f2fs_sb_mounted_blkzoned(sb)) + if (f2fs_sb_has_blkzoned(sb)) len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? ", " : "", "blkzoned"); if (f2fs_sb_has_extra_attr(sb)) @@ -116,6 +116,9 @@ static ssize_t features_show(struct f2fs_attr *a, if (f2fs_sb_has_inode_crtime(sb)) len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", len ? ", " : "", "inode_crtime"); + if (f2fs_sb_has_lost_found(sb)) + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", + len ? ", " : "", "lost_found"); len += snprintf(buf + len, PAGE_SIZE - len, "\n"); return len; } @@ -136,6 +139,27 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a, if (!ptr) return -EINVAL; + if (!strcmp(a->attr.name, "extension_list")) { + __u8 (*extlist)[F2FS_EXTENSION_LEN] = + sbi->raw_super->extension_list; + int cold_count = le32_to_cpu(sbi->raw_super->extension_count); + int hot_count = sbi->raw_super->hot_ext_count; + int len = 0, i; + + len += snprintf(buf + len, PAGE_SIZE - len, + "cold file extenstion:\n"); + for (i = 0; i < cold_count; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", + extlist[i]); + + len += snprintf(buf + len, PAGE_SIZE - len, + "hot file extenstion:\n"); + for (i = cold_count; i < cold_count + hot_count; i++) + len += snprintf(buf + len, PAGE_SIZE - len, "%s\n", + extlist[i]); + return len; + } + ui = (unsigned int *)(ptr + a->offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); @@ -154,6 +178,41 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, if (!ptr) return -EINVAL; + if (!strcmp(a->attr.name, "extension_list")) { + const char *name = strim((char *)buf); + bool set = true, hot; + + if (!strncmp(name, "[h]", 3)) + hot = true; + else if (!strncmp(name, "[c]", 3)) + hot = false; + else + return -EINVAL; + + name += 3; + + if (*name == '!') { + name++; + set = false; + } + + if (strlen(name) >= F2FS_EXTENSION_LEN) + return -EINVAL; + + down_write(&sbi->sb_lock); + + ret = update_extension_list(sbi, name, hot, set); + if (ret) + goto out; + + ret = f2fs_commit_super(sbi, false); + if (ret) + update_extension_list(sbi, name, hot, !set); +out: + up_write(&sbi->sb_lock); + return ret ? ret : count; + } + ui = (unsigned int *)(ptr + a->offset); ret = kstrtoul(skip_spaces(buf), 0, &t); @@ -166,7 +225,7 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, if (a->struct_type == RESERVED_BLOCKS) { spin_lock(&sbi->stat_lock); if (t > (unsigned long)(sbi->user_block_count - - sbi->root_reserved_blocks)) { + F2FS_OPTION(sbi).root_reserved_blocks)) { spin_unlock(&sbi->stat_lock); return -EINVAL; } @@ -236,6 +295,7 @@ enum feat_id { FEAT_FLEXIBLE_INLINE_XATTR, FEAT_QUOTA_INO, FEAT_INODE_CRTIME, + FEAT_LOST_FOUND, }; static ssize_t f2fs_feature_show(struct f2fs_attr *a, @@ -251,6 +311,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a, case FEAT_FLEXIBLE_INLINE_XATTR: case FEAT_QUOTA_INO: case FEAT_INODE_CRTIME: + case FEAT_LOST_FOUND: return snprintf(buf, PAGE_SIZE, "supported\n"); } return 0; @@ -307,6 +368,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold); +F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list); #ifdef CONFIG_F2FS_FAULT_INJECTION F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); @@ -329,6 +391,7 @@ F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM); F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR); F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO); F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME); +F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND); #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { @@ -357,6 +420,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(iostat_enable), ATTR_LIST(readdir_ra), ATTR_LIST(gc_pin_file_thresh), + ATTR_LIST(extension_list), #ifdef CONFIG_F2FS_FAULT_INJECTION ATTR_LIST(inject_rate), ATTR_LIST(inject_type), @@ -383,6 +447,7 @@ static struct attribute *f2fs_feat_attrs[] = { ATTR_LIST(flexible_inline_xattr), ATTR_LIST(quota_ino), ATTR_LIST(inode_crtime), + ATTR_LIST(lost_found), NULL, }; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ca7d46de5ca3..028f38f0906c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1947,8 +1947,10 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, err = copy_out_args(cs, &req->out, nbytes); if (req->in.h.opcode == FUSE_CANONICAL_PATH) { - req->out.h.error = kern_path((char *)req->out.args[0].value, 0, - req->canonical_path); + char *path = (char *)req->out.args[0].value; + + path[req->out.args[0].size - 1] = 0; + req->out.h.error = kern_path(path, 0, req->canonical_path); } fuse_copy_finish(cs); diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 5f31ebd96c06..a2edb0049eb5 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -129,6 +129,8 @@ lockd(void *vrqstp) { int err = 0; struct svc_rqst *rqstp = vrqstp; + struct net *net = &init_net; + struct lockd_net *ln = net_generic(net, lockd_net_id); /* try_to_freeze() is called from svc_recv() */ set_freezable(); @@ -173,6 +175,8 @@ lockd(void *vrqstp) if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); + cancel_delayed_work_sync(&ln->grace_period_end); + locks_end_grace(&ln->lockd_manager); return 0; } @@ -267,8 +271,6 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net) if (ln->nlmsvc_users) { if (--ln->nlmsvc_users == 0) { nlm_shutdown_hosts_net(net); - cancel_delayed_work_sync(&ln->grace_period_end); - locks_end_grace(&ln->lockd_manager); svc_shutdown_net(serv, net); dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net); } diff --git a/fs/namei.c b/fs/namei.c index 1f2e81c76021..e82d2955de00 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -585,9 +585,10 @@ static int __nd_alloc_stack(struct nameidata *nd) static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; + struct super_block *sb = mnt->mnt_sb; - /* Only bind mounts can have disconnected paths */ - if (mnt->mnt_root == mnt->mnt_sb->s_root) + /* Bind mounts and multi-root filesystems can have disconnected paths */ + if (!(sb->s_iflags & SB_I_MULTIROOT) && (mnt->mnt_root == sb->s_root)) return true; return is_subdir(path->dentry, mnt->mnt_root); diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index 88dbbc9fcf4d..f571570a2e72 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -980,6 +980,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id, goto out; } *bytes_read = ncp_reply_be16(server, 0); + if (*bytes_read > to_read) { + result = -EINVAL; + goto out; + } source = ncp_reply_data(server, 2 + (offset & 1)); memcpy(target, source, *bytes_read); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 5fd3cf54b2b3..211440722e24 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -86,9 +86,9 @@ struct nfs_direct_req { struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; int mirror_count; + loff_t io_start; /* Start offset for I/O */ ssize_t count, /* bytes actually processed */ bytes_left, /* bytes left to be sent */ - io_start, /* start of IO */ error; /* any reported error */ struct completion completion; /* wait for i/o completion */ diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 54313322ee5b..c8e90152b61b 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -461,6 +461,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, goto out_err_free; /* fh */ + rc = -EIO; p = xdr_inline_decode(&stream, 4); if (!p) goto out_err_free; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8ef6f70c9e25..0f397e62de5a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3025,6 +3025,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f .rpc_resp = &res, }; int status; + int i; bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_FH_EXPIRE_TYPE | @@ -3090,8 +3091,13 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; server->cache_consistency_bitmask[2] = 0; + + /* Avoid a regression due to buggy server */ + for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) + res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, sizeof(server->exclcreat_bitmask)); + server->acl_bitmask = res.acl_bitmask; server->fh_expire_type = res.fh_expire_type; } @@ -7670,6 +7676,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf /* fall through */ case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; + case -NFS4ERR_BADSESSION: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + nfs4_schedule_session_recovery(clp->cl_session, + task->tk_status); + break; default: nfs4_schedule_lease_recovery(clp); } @@ -7748,7 +7760,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp, if (status == 0) status = task->tk_status; rpc_put_task(task); - return 0; out: dprintk("<-- %s status=%d\n", __func__, status); return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9a0b219ff74d..83fba40396ae 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1593,13 +1593,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); } -static void nfs4_reclaim_complete(struct nfs_client *clp, +static int nfs4_reclaim_complete(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops, struct rpc_cred *cred) { /* Notify the server we're done reclaiming our state */ if (ops->reclaim_complete) - (void)ops->reclaim_complete(clp, cred); + return ops->reclaim_complete(clp, cred); + return 0; } static void nfs4_clear_reclaim_server(struct nfs_server *server) @@ -1646,13 +1647,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) { const struct nfs4_state_recovery_ops *ops; struct rpc_cred *cred; + int err; if (!nfs4_state_clear_reclaim_reboot(clp)) return; ops = clp->cl_mvops->reboot_recovery_ops; cred = nfs4_get_clid_cred(clp); - nfs4_reclaim_complete(clp, ops, cred); + err = nfs4_reclaim_complete(clp, ops, cred); put_rpccred(cred); + if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION) + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); } static void nfs_delegation_clear_all(struct nfs_client *clp) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 8ebfdd00044b..4bdc2fc86280 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1273,8 +1273,10 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) mirror = &desc->pg_mirrors[midx]; if (!list_empty(&mirror->pg_list)) { prev = nfs_list_entry(mirror->pg_list.prev); - if (index != prev->wb_index + 1) - nfs_pageio_complete_mirror(desc, midx); + if (index != prev->wb_index + 1) { + nfs_pageio_complete(desc); + break; + } } } } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 3149f7e58d6f..62f358f67764 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2581,6 +2581,8 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, /* initial superblock/root creation */ mount_info->fill_super(s, mount_info); nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned); + if (!(server->flags & NFS_MOUNT_UNSHARED)) + s->s_iflags |= SB_I_MULTIROOT; } mntroot = nfs_get_root(s, mount_info->mntfh, dev_name); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 209dbfc50cd4..bfbee8ddf978 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1245,14 +1245,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp, const struct nfsd4_layout_ops *ops; struct nfs4_layout_stateid *ls; __be32 nfserr; - int accmode; + int accmode = NFSD_MAY_READ_IF_EXEC; switch (lgp->lg_seg.iomode) { case IOMODE_READ: - accmode = NFSD_MAY_READ; + accmode |= NFSD_MAY_READ; break; case IOMODE_RW: - accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; + accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE; break; default: dprintk("%s: invalid iomode %d\n", diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 91e0c5429b4d..17138a97f306 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -92,6 +92,12 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, err = follow_down(&path); if (err < 0) goto out; + if (path.mnt == exp->ex_path.mnt && path.dentry == dentry && + nfsd_mountpoint(dentry, exp) == 2) { + /* This is only a mountpoint in some other namespace */ + path_put(&path); + goto out; + } exp2 = rqst_exp_get_by_name(rqstp, &path); if (IS_ERR(exp2)) { @@ -165,16 +171,26 @@ static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, st /* * For nfsd purposes, we treat V4ROOT exports as though there was an * export at *every* directory. + * We return: + * '1' if this dentry *must* be an export point, + * '2' if it might be, if there is really a mount here, and + * '0' if there is no chance of an export point here. */ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp) { - if (d_mountpoint(dentry)) + if (!d_inode(dentry)) + return 0; + if (exp->ex_flags & NFSEXP_V4ROOT) return 1; if (nfsd4_is_junction(dentry)) return 1; - if (!(exp->ex_flags & NFSEXP_V4ROOT)) - return 0; - return d_inode(dentry) != NULL; + if (d_mountpoint(dentry)) + /* + * Might only be a mountpoint in a different namespace, + * but we need to check. + */ + return 2; + return 0; } __be32 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 220b04f04523..985a4cdae06d 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -272,6 +272,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name, return vfs_getxattr(realpath.dentry, name, value, size); } +static bool ovl_can_list(const char *s) +{ + /* List all non-trusted xatts */ + if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) + return true; + + /* Never list trusted.overlay, list other trusted for superuser only */ + return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); +} + ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) { struct path realpath; @@ -296,7 +306,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) return -EIO; len -= slen; - if (ovl_is_private_xattr(s)) { + if (!ovl_can_list(s)) { res -= slen; memmove(s, s + slen, len); } else { diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 1ade1206bb89..08dce22afec1 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -81,3 +81,10 @@ config PROC_CHILDREN Say Y if you are running any user-space software which takes benefit from this interface. For example, rkt is such a piece of software. + +config PROC_UID + bool "Include /proc/uid/ files" + default y + depends on PROC_FS && RT_MUTEXES + help + Provides aggregated per-uid information under /proc/uid. diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 7151ea428041..baab1daf7585 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -24,6 +24,7 @@ proc-y += softirqs.o proc-y += namespaces.o proc-y += self.o proc-y += thread_self.o +proc-$(CONFIG_PROC_UID) += uid.o proc-$(CONFIG_PROC_SYSCTL) += proc_sysctl.o proc-$(CONFIG_NET) += proc_net.o proc-$(CONFIG_PROC_KCORE) += kcore.o diff --git a/fs/proc/base.c b/fs/proc/base.c index cdd820771425..072dac45b102 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -87,6 +87,7 @@ #include <linux/slab.h> #include <linux/flex_array.h> #include <linux/posix-timers.h> +#include <linux/cpufreq_times.h> #ifdef CONFIG_HARDWALL #include <asm/hardwall.h> #endif @@ -3089,8 +3090,8 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("cgroup", S_IRUGO, proc_cgroup_show), #endif ONE("oom_score", S_IRUGO, proc_oom_score), - REG("oom_adj", S_IRUSR, proc_oom_adj_operations), - REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), + REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), @@ -3117,6 +3118,9 @@ static const struct pid_entry tgid_base_stuff[] = { REG("timers", S_IRUGO, proc_timers_operations), #endif REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations), +#ifdef CONFIG_CPU_FREQ_TIMES + ONE("time_in_state", 0444, proc_time_in_state_show), +#endif }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3480,8 +3484,8 @@ static const struct pid_entry tid_base_stuff[] = { ONE("cgroup", S_IRUGO, proc_cgroup_show), #endif ONE("oom_score", S_IRUGO, proc_oom_score), - REG("oom_adj", S_IRUSR, proc_oom_adj_operations), - REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations), + REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), + REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), @@ -3501,6 +3505,9 @@ static const struct pid_entry tid_base_stuff[] = { REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations), REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations), #endif +#ifdef CONFIG_CPU_FREQ_TIMES + ONE("time_in_state", 0444, proc_time_in_state_show), +#endif }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/internal.h b/fs/proc/internal.h index ef2b01533c97..1f01beda3bf3 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -257,6 +257,15 @@ static inline void sysctl_head_put(struct ctl_table_header *head) { } #endif /* + * uid.c + */ +#ifdef CONFIG_PROC_UID +extern int proc_uid_init(void); +#else +static inline void proc_uid_init(void) { } +#endif + +/* * proc_tty.c */ #ifdef CONFIG_TTY diff --git a/fs/proc/root.c b/fs/proc/root.c index ec649c92d270..1d0b6a125563 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -182,7 +182,7 @@ void __init proc_root_init(void) proc_symlink("mounts", NULL, "self/mounts"); proc_net_init(); - + proc_uid_init(); #ifdef CONFIG_SYSVIPC proc_mkdir("sysvipc", NULL); #endif diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f6c512a550e5..99c4ffaa43a8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -305,24 +305,15 @@ static int do_maps_open(struct inode *inode, struct file *file, * /proc/PID/maps that is the stack of the main task. */ static int is_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, int is_pid) + struct vm_area_struct *vma) { - int stack = 0; - - if (is_pid) { - stack = vma->vm_start <= vma->vm_mm->start_stack && - vma->vm_end >= vma->vm_mm->start_stack; - } else { - struct inode *inode = priv->inode; - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) - stack = vma_is_stack_for_task(vma, task); - rcu_read_unlock(); - } - return stack; + /* + * We make no effort to guess what a given thread considers to be + * its "stack". It's not even well-defined for programs written + * languages like Go. + */ + return vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; } static void @@ -389,7 +380,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) goto done; } - if (is_stack(priv, vma, is_pid)) { + if (is_stack(priv, vma)) { name = "[stack]"; goto done; } @@ -1859,7 +1850,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_file_path(m, file, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); - } else if (is_stack(proc_priv, vma, is_pid)) { + } else if (is_stack(proc_priv, vma)) { seq_puts(m, " stack"); } diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index faacb0c0d857..37175621e890 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm, } static int is_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, int is_pid) + struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; - int stack = 0; - - if (is_pid) { - stack = vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack; - } else { - struct inode *inode = priv->inode; - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) - stack = vma_is_stack_for_task(vma, task); - rcu_read_unlock(); - } - return stack; + + /* + * We make no effort to guess what a given thread considers to be + * its "stack". It's not even well-defined for programs written + * languages like Go. + */ + return vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack; } /* @@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, if (file) { seq_pad(m, ' '); seq_file_path(m, file, ""); - } else if (mm && is_stack(priv, vma, is_pid)) { + } else if (mm && is_stack(priv, vma)) { seq_pad(m, ' '); seq_printf(m, "[stack]"); } diff --git a/fs/proc/uid.c b/fs/proc/uid.c new file mode 100644 index 000000000000..040591d341f8 --- /dev/null +++ b/fs/proc/uid.c @@ -0,0 +1,295 @@ +/* + * /proc/uid support + */ + +#include <linux/cpufreq_times.h> +#include <linux/fs.h> +#include <linux/hashtable.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include "internal.h" + +struct proc_dir_entry *proc_uid; + +#define UID_HASH_BITS 10 + +static DECLARE_HASHTABLE(proc_uid_hash_table, UID_HASH_BITS); + +/* + * use rt_mutex here to avoid priority inversion between high-priority readers + * of these files and tasks calling proc_register_uid(). + */ +static DEFINE_RT_MUTEX(proc_uid_lock); /* proc_uid_hash_table */ + +struct uid_hash_entry { + uid_t uid; + struct hlist_node hash; +}; + +/* Caller must hold proc_uid_lock */ +static bool uid_hash_entry_exists_locked(uid_t uid) +{ + struct uid_hash_entry *entry; + + hash_for_each_possible(proc_uid_hash_table, entry, hash, uid) { + if (entry->uid == uid) + return true; + } + return false; +} + +void proc_register_uid(kuid_t kuid) +{ + struct uid_hash_entry *entry; + bool exists; + uid_t uid = from_kuid_munged(current_user_ns(), kuid); + + rt_mutex_lock(&proc_uid_lock); + exists = uid_hash_entry_exists_locked(uid); + rt_mutex_unlock(&proc_uid_lock); + if (exists) + return; + + entry = kzalloc(sizeof(struct uid_hash_entry), GFP_KERNEL); + if (!entry) + return; + entry->uid = uid; + + rt_mutex_lock(&proc_uid_lock); + if (uid_hash_entry_exists_locked(uid)) + kfree(entry); + else + hash_add(proc_uid_hash_table, &entry->hash, uid); + rt_mutex_unlock(&proc_uid_lock); +} + +struct uid_entry { + const char *name; + int len; + umode_t mode; + const struct inode_operations *iop; + const struct file_operations *fop; +}; + +#define NOD(NAME, MODE, IOP, FOP) { \ + .name = (NAME), \ + .len = sizeof(NAME) - 1, \ + .mode = MODE, \ + .iop = IOP, \ + .fop = FOP, \ +} + +#ifdef CONFIG_CPU_FREQ_TIMES +const struct file_operations proc_uid_time_in_state_operations = { + .open = single_uid_time_in_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static const struct uid_entry uid_base_stuff[] = { +#ifdef CONFIG_CPU_FREQ_TIMES + NOD("time_in_state", 0444, NULL, &proc_uid_time_in_state_operations), +#endif +}; + +const struct inode_operations proc_uid_def_inode_operations = { + .setattr = proc_setattr, +}; + +struct inode *proc_uid_make_inode(struct super_block *sb, kuid_t kuid) +{ + struct inode *inode; + + inode = new_inode(sb); + if (!inode) + return NULL; + + inode->i_ino = get_next_ino(); + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode->i_op = &proc_uid_def_inode_operations; + inode->i_uid = kuid; + + return inode; +} + +static int proc_uident_instantiate(struct inode *dir, struct dentry *dentry, + struct task_struct *unused, const void *ptr) +{ + const struct uid_entry *u = ptr; + struct inode *inode; + + inode = proc_uid_make_inode(dir->i_sb, dir->i_uid); + if (!inode) + return -ENOENT; + + inode->i_mode = u->mode; + if (S_ISDIR(inode->i_mode)) + set_nlink(inode, 2); + if (u->iop) + inode->i_op = u->iop; + if (u->fop) + inode->i_fop = u->fop; + d_add(dentry, inode); + return 0; +} + +static struct dentry *proc_uid_base_lookup(struct inode *dir, + struct dentry *dentry, + unsigned int flags) +{ + const struct uid_entry *u, *last; + unsigned int nents = ARRAY_SIZE(uid_base_stuff); + + if (nents == 0) + return ERR_PTR(-ENOENT); + + last = &uid_base_stuff[nents - 1]; + for (u = uid_base_stuff; u <= last; u++) { + if (u->len != dentry->d_name.len) + continue; + if (!memcmp(dentry->d_name.name, u->name, u->len)) + break; + } + if (u > last) + return ERR_PTR(-ENOENT); + + return ERR_PTR(proc_uident_instantiate(dir, dentry, NULL, u)); +} + +static int proc_uid_base_readdir(struct file *file, struct dir_context *ctx) +{ + unsigned int nents = ARRAY_SIZE(uid_base_stuff); + const struct uid_entry *u; + + if (!dir_emit_dots(file, ctx)) + return 0; + + if (ctx->pos >= nents + 2) + return 0; + + for (u = uid_base_stuff + (ctx->pos - 2); + u <= uid_base_stuff + nents - 1; u++) { + if (!proc_fill_cache(file, ctx, u->name, u->len, + proc_uident_instantiate, NULL, u)) + break; + ctx->pos++; + } + + return 0; +} + +static const struct inode_operations proc_uid_base_inode_operations = { + .lookup = proc_uid_base_lookup, + .setattr = proc_setattr, +}; + +static const struct file_operations proc_uid_base_operations = { + .read = generic_read_dir, + .iterate = proc_uid_base_readdir, + .llseek = default_llseek, +}; + +static int proc_uid_instantiate(struct inode *dir, struct dentry *dentry, + struct task_struct *unused, const void *ptr) +{ + unsigned int i, len; + nlink_t nlinks; + kuid_t *kuid = (kuid_t *)ptr; + struct inode *inode = proc_uid_make_inode(dir->i_sb, *kuid); + + if (!inode) + return -ENOENT; + + inode->i_mode = S_IFDIR | 0555; + inode->i_op = &proc_uid_base_inode_operations; + inode->i_fop = &proc_uid_base_operations; + inode->i_flags |= S_IMMUTABLE; + + nlinks = 2; + len = ARRAY_SIZE(uid_base_stuff); + for (i = 0; i < len; ++i) { + if (S_ISDIR(uid_base_stuff[i].mode)) + ++nlinks; + } + set_nlink(inode, nlinks); + + d_add(dentry, inode); + + return 0; +} + +static int proc_uid_readdir(struct file *file, struct dir_context *ctx) +{ + int last_shown, i; + unsigned long bkt; + struct uid_hash_entry *entry; + + if (!dir_emit_dots(file, ctx)) + return 0; + + i = 0; + last_shown = ctx->pos - 2; + rt_mutex_lock(&proc_uid_lock); + hash_for_each(proc_uid_hash_table, bkt, entry, hash) { + int len; + char buf[PROC_NUMBUF]; + + if (i < last_shown) + continue; + len = snprintf(buf, sizeof(buf), "%u", entry->uid); + if (!proc_fill_cache(file, ctx, buf, len, + proc_uid_instantiate, NULL, &entry->uid)) + break; + i++; + ctx->pos++; + } + rt_mutex_unlock(&proc_uid_lock); + return 0; +} + +static struct dentry *proc_uid_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + int result = -ENOENT; + + uid_t uid = name_to_int(&dentry->d_name); + bool uid_exists; + + rt_mutex_lock(&proc_uid_lock); + uid_exists = uid_hash_entry_exists_locked(uid); + rt_mutex_unlock(&proc_uid_lock); + if (uid_exists) { + kuid_t kuid = make_kuid(current_user_ns(), uid); + + result = proc_uid_instantiate(dir, dentry, NULL, &kuid); + } + return ERR_PTR(result); +} + +static const struct file_operations proc_uid_operations = { + .read = generic_read_dir, + .iterate = proc_uid_readdir, + .llseek = default_llseek, +}; + +static const struct inode_operations proc_uid_inode_operations = { + .lookup = proc_uid_lookup, + .setattr = proc_setattr, +}; + +int __init proc_uid_init(void) +{ + proc_uid = proc_mkdir("uid", NULL); + if (!proc_uid) + return -ENOMEM; + proc_uid->proc_iops = &proc_uid_inode_operations; + proc_uid->proc_fops = &proc_uid_operations; + + return 0; +} diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 9d6486d416a3..a72097b625ef 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, * will be requeued because superblock is being shutdown and doesn't * have MS_ACTIVE set. */ - cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); + reiserfs_cancel_old_flush(sb); /* wait for all commits to finish */ cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 5dcf3ab83886..6ca00471afbf 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s, struct reiserfs_list_bitmap *, unsigned int); void reiserfs_schedule_old_flush(struct super_block *s); +void reiserfs_cancel_old_flush(struct super_block *s); void add_save_link(struct reiserfs_transaction_handle *th, struct inode *inode, int truncate); int remove_save_link(struct inode *inode, int truncate); diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f9f3be50081a..ee095246da4e 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work) s = sbi->s_journal->j_work_sb; spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Avoid clobbering the cancel state... */ + if (sbi->work_queued == 1) + sbi->work_queued = 0; spin_unlock(&sbi->old_work_lock); reiserfs_sync_fs(s, 1); @@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s) spin_unlock(&sbi->old_work_lock); } -static void cancel_old_flush(struct super_block *s) +void reiserfs_cancel_old_flush(struct super_block *s) { struct reiserfs_sb_info *sbi = REISERFS_SB(s); - cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); spin_lock(&sbi->old_work_lock); - sbi->work_queued = 0; + /* Make sure no new flushes will be queued */ + sbi->work_queued = 2; spin_unlock(&sbi->old_work_lock); + cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); } static int reiserfs_freeze(struct super_block *s) { struct reiserfs_transaction_handle th; - cancel_old_flush(s); + reiserfs_cancel_old_flush(s); reiserfs_write_lock(s); if (!(s->s_flags & MS_RDONLY)) { @@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s) static int reiserfs_unfreeze(struct super_block *s) { + struct reiserfs_sb_info *sbi = REISERFS_SB(s); + reiserfs_allow_writes(s); + spin_lock(&sbi->old_work_lock); + /* Allow old_work to run again */ + sbi->work_queued = 0; + spin_unlock(&sbi->old_work_lock); return 0; } @@ -2187,7 +2196,7 @@ error_unlocked: if (sbi->commit_wq) destroy_workqueue(sbi->commit_wq); - cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); + reiserfs_cancel_old_flush(s); reiserfs_free_bitmap_cache(s); if (SB_BUFFER_WITH_SB(s)) diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c index 2879d1291a11..1461254f301d 100644 --- a/fs/sdcardfs/file.c +++ b/fs/sdcardfs/file.c @@ -414,7 +414,7 @@ ssize_t sdcardfs_write_iter(struct kiocb *iocb, struct iov_iter *iter) fsstack_copy_inode_size(inode, file_inode(lower_file)); fsstack_copy_attr_times(inode, file_inode(lower_file)); if (sizeof(loff_t) > sizeof(long)) - inode_lock(inode); + inode_unlock(inode); } out: return err; diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h index f0607a55441d..055e413509e4 100644 --- a/fs/sdcardfs/sdcardfs.h +++ b/fs/sdcardfs/sdcardfs.h @@ -669,7 +669,7 @@ static inline bool str_n_case_eq(const char *s1, const char *s2, size_t len) static inline bool qstr_case_eq(const struct qstr *q1, const struct qstr *q2) { - return q1->len == q2->len && str_case_eq(q1->name, q2->name); + return q1->len == q2->len && str_n_case_eq(q1->name, q2->name, q2->len); } #define QSTR_LITERAL(string) QSTR_INIT(string, sizeof(string)-1) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 14b0ff32fb9f..4814cf971048 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -755,6 +755,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); +int pud_free_pmd_page(pud_t *pud); +int pmd_free_pte_page(pmd_t *pmd); #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { @@ -772,6 +774,14 @@ static inline int pmd_clear_huge(pmd_t *pmd) { return 0; } +static inline int pud_free_pmd_page(pud_t *pud) +{ + return 0; +} +static inline int pmd_free_pte_page(pmd_t *pmd) +{ + return 0; +} #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* !__ASSEMBLY__ */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 3febb4b9fce9..d842bec3d271 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -241,5 +241,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); +extern bool drm_kms_helper_is_poll_worker(void); #endif diff --git a/include/linux/audit.h b/include/linux/audit.h index faac391badac..9b95bb222e73 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -26,6 +26,7 @@ #include <linux/sched.h> #include <linux/ptrace.h> #include <uapi/linux/audit.h> +#include <linux/tty.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -239,6 +240,23 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) return tsk->sessionid; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + struct tty_struct *tty = NULL; + unsigned long flags; + + spin_lock_irqsave(&tsk->sighand->siglock, flags); + if (tsk->signal) + tty = tty_kref_get(tsk->signal->tty); + spin_unlock_irqrestore(&tsk->sighand->siglock, flags); + return tty; +} + +static inline void audit_put_tty(struct tty_struct *tty) +{ + tty_kref_put(tty); +} + extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); extern void __audit_bprm(struct linux_binprm *bprm); @@ -410,6 +428,12 @@ static inline unsigned int audit_get_sessionid(struct task_struct *tsk) { return -1; } +static inline struct tty_struct *audit_get_tty(struct task_struct *tsk) +{ + return NULL; +} +static inline void audit_put_tty(struct tty_struct *tty) +{ } static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 6d73a04d0150..e7fd9490bcf6 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -56,6 +56,7 @@ struct bio { struct bio *bi_next; /* request queue link */ struct block_device *bi_bdev; unsigned int bi_flags; /* status, command, etc */ + unsigned short bi_write_hint; int bi_error; unsigned long bi_rw; /* bottom bits READ/WRITE, * top bits priority diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h new file mode 100644 index 000000000000..3fb38750c853 --- /dev/null +++ b/include/linux/cpufreq_times.h @@ -0,0 +1,40 @@ +/* drivers/cpufreq/cpufreq_times.c + * + * Copyright (C) 2018 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_CPUFREQ_TIMES_H +#define _LINUX_CPUFREQ_TIMES_H + +#include <linux/cpufreq.h> +#include <linux/cputime.h> +#include <linux/pid.h> + +#ifdef CONFIG_CPU_FREQ_TIMES +void cpufreq_task_times_init(struct task_struct *p); +void cpufreq_task_times_exit(struct task_struct *p); +int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *p); +void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime); +void cpufreq_times_create_policy(struct cpufreq_policy *policy); +void cpufreq_times_record_transition(struct cpufreq_freqs *freq); +void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end); +int single_uid_time_in_state_open(struct inode *inode, struct file *file); +#else +static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {} +static inline void cpufreq_times_record_transition( + struct cpufreq_freqs *freq) {} +static inline void cpufreq_task_times_remove_uids(uid_t uid_start, + uid_t uid_end) {} +#endif /* CONFIG_CPU_FREQ_TIMES */ +#endif /* _LINUX_CPUFREQ_TIMES_H */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 94013037585e..e0bfecde4e7b 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -675,6 +675,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); +static inline bool cpumask_available(cpumask_var_t mask) +{ + return mask != NULL; +} + #else typedef struct cpumask cpumask_var_t[1]; @@ -715,6 +720,11 @@ static inline void free_cpumask_var(cpumask_var_t mask) static inline void free_bootmem_cpumask_var(cpumask_var_t mask) { } + +static inline bool cpumask_available(cpumask_var_t mask) +{ + return true; +} #endif /* CONFIG_CPUMASK_OFFSTACK */ /* It's common to want to use cpu_all_mask in struct member initializers, diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index c82ae65b5330..2ebfa01b7091 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -21,6 +21,7 @@ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ #define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ +#define F2FS_EXTENSION_LEN 8 /* max size of extension */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ @@ -38,15 +39,14 @@ #define F2FS_MAX_QUOTAS 3 -#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */ -#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */ -#define F2FS_IO_SIZE_BYTES(sbi) (1 << ((sbi)->write_io_size_bits + 12)) /* B */ -#define F2FS_IO_SIZE_BITS(sbi) ((sbi)->write_io_size_bits) /* power of 2 */ +#define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ +#define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ +#define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ +#define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) /* This flag is used by node and meta inodes, and by recovery */ #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) -#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM) /* * For further optimization on multi-head logs, on-disk layout supports maximum @@ -102,7 +102,7 @@ struct f2fs_super_block { __u8 uuid[16]; /* 128-bit uuid for volume */ __le16 volume_name[MAX_VOLUME_NAME]; /* volume name */ __le32 extension_count; /* # of extensions below */ - __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */ + __u8 extension_list[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];/* extension array */ __le32 cp_payload; __u8 version[VERSION_LEN]; /* the kernel version */ __u8 init_version[VERSION_LEN]; /* the initial kernel version */ @@ -111,12 +111,14 @@ struct f2fs_super_block { __u8 encrypt_pw_salt[16]; /* Salt used for string2key algorithm */ struct f2fs_device devs[MAX_DEVICES]; /* device list */ __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ - __u8 reserved[315]; /* valid reserved region */ + __u8 hot_ext_count; /* # of hot file extension */ + __u8 reserved[314]; /* valid reserved region */ } __packed; /* * For checkpoint */ +#define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 #define CP_NOCRC_RECOVERY_FLAG 0x00000200 #define CP_TRIMMED_FLAG 0x00000100 #define CP_NAT_BITS_FLAG 0x00000080 @@ -303,6 +305,10 @@ struct f2fs_node { */ #define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) #define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8) +#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \ + ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \ + BITS_PER_LONG * BITS_PER_LONG) + struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f8965450c86c..250f4d1ce9c5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -20,6 +20,7 @@ #include <linux/rwsem.h> #include <linux/capability.h> #include <linux/semaphore.h> +#include <linux/fcntl.h> #include <linux/fiemap.h> #include <linux/rculist_bl.h> #include <linux/atomic.h> @@ -143,6 +144,9 @@ typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate); /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) +/* File is capable of returning -EAGAIN if I/O will block */ +#define FMODE_NOWAIT ((__force fmode_t)0x8000000) + /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are @@ -326,9 +330,22 @@ struct page; struct address_space; struct writeback_control; +/* + * Write life time hint values. + */ +enum rw_hint { + WRITE_LIFE_NOT_SET = 0, + WRITE_LIFE_NONE = RWH_WRITE_LIFE_NONE, + WRITE_LIFE_SHORT = RWH_WRITE_LIFE_SHORT, + WRITE_LIFE_MEDIUM = RWH_WRITE_LIFE_MEDIUM, + WRITE_LIFE_LONG = RWH_WRITE_LIFE_LONG, + WRITE_LIFE_EXTREME = RWH_WRITE_LIFE_EXTREME, +}; + #define IOCB_EVENTFD (1 << 0) #define IOCB_APPEND (1 << 1) #define IOCB_DIRECT (1 << 2) +#define IOCB_NOWAIT (1 << 7) struct kiocb { struct file *ki_filp; @@ -336,6 +353,7 @@ struct kiocb { void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); void *private; int ki_flags; + enum rw_hint ki_hint; }; static inline bool is_sync_kiocb(struct kiocb *kiocb) @@ -635,6 +653,7 @@ struct inode { spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ unsigned short i_bytes; unsigned int i_blkbits; + enum rw_hint i_write_hint; blkcnt_t i_blocks; #ifdef __NEED_I_SIZE_ORDERED @@ -1073,8 +1092,6 @@ struct file_lock_context { #define OFFT_OFFSET_MAX INT_LIMIT(off_t) #endif -#include <linux/fcntl.h> - extern void send_sigio(struct fown_struct *fown, int fd, int band); #ifdef CONFIG_FILE_LOCKING @@ -1314,6 +1331,7 @@ struct mm_struct; /* sb->s_iflags */ #define SB_I_CGROUPWB 0x00000001 /* cgroup-aware writeback enabled */ #define SB_I_NOEXEC 0x00000002 /* Ignore executables on this fs */ +#define SB_I_MULTIROOT 0x00000008 /* Multiple roots to the dentry tree */ /* Possible states of 'frozen' field */ enum { diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 8641e56b8f8a..9e535af579e8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -13,42 +13,13 @@ #ifndef _LINUX_FSCRYPT_H #define _LINUX_FSCRYPT_H -#include <linux/key.h> #include <linux/fs.h> -#include <linux/mm.h> -#include <linux/bio.h> -#include <linux/dcache.h> -#include <crypto/skcipher.h> -#include <uapi/linux/fs.h> #define FS_CRYPTO_BLOCK_SIZE 16 +struct fscrypt_ctx; struct fscrypt_info; -struct fscrypt_ctx { - union { - struct { - struct page *bounce_page; /* Ciphertext page */ - struct page *control_page; /* Original page */ - } w; - struct { - struct bio *bio; - struct work_struct work; - } r; - struct list_head free_list; /* Free list */ - }; - u8 flags; /* Flags */ -}; - -/** - * For encrypted symlinks, the ciphertext length is stored at the beginning - * of the string in little-endian format. - */ -struct fscrypt_symlink_data { - __le16 len; - char encrypted_path[1]; -} __packed; - struct fscrypt_str { unsigned char *name; u32 len; @@ -67,86 +38,11 @@ struct fscrypt_name { #define fname_name(p) ((p)->disk_name.name) #define fname_len(p) ((p)->disk_name.len) -/* - * fscrypt superblock flags - */ -#define FS_CFLG_OWN_PAGES (1U << 1) - -/* - * crypto opertions for filesystems - */ -struct fscrypt_operations { - unsigned int flags; - const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); - unsigned (*max_namelen)(struct inode *); -}; - -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) -{ - if (inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode)) - return true; - return false; -} - -static inline bool fscrypt_valid_enc_modes(u32 contents_mode, - u32 filenames_mode) -{ - if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && - filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) - return true; - - if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && - filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) - return true; - - return false; -} - -static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) -{ - if (str->len == 1 && str->name[0] == '.') - return true; - - if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') - return true; - - return false; -} - #if __FS_HAS_ENCRYPTION - -static inline struct page *fscrypt_control_page(struct page *page) -{ - return ((struct fscrypt_ctx *)page_private(page))->w.control_page; -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return (inode->i_crypt_info != NULL); -} - #include <linux/fscrypt_supp.h> - -#else /* !__FS_HAS_ENCRYPTION */ - -static inline struct page *fscrypt_control_page(struct page *page) -{ - WARN_ON_ONCE(1); - return ERR_PTR(-EINVAL); -} - -static inline bool fscrypt_has_encryption_key(const struct inode *inode) -{ - return 0; -} - +#else #include <linux/fscrypt_notsupp.h> -#endif /* __FS_HAS_ENCRYPTION */ +#endif /** * fscrypt_require_key - require an inode's encryption key @@ -287,4 +183,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, return 0; } +/** + * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * @dir: directory in which the symlink is being created + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @max_len: space the filesystem has available to store the symlink target + * @disk_link: (out) the on-disk symlink target being prepared + * + * This function computes the size the symlink target will require on-disk, + * stores it in @disk_link->len, and validates it against @max_len. An + * encrypted symlink may be longer than the original. + * + * Additionally, @disk_link->name is set to @target if the symlink will be + * unencrypted, but left NULL if the symlink will be encrypted. For encrypted + * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the + * on-disk target later. (The reason for the two-step process is that some + * filesystems need to know the size of the symlink target before creating the + * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) + * + * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, + * -ENOKEY if the encryption key is missing, or another -errno code if a problem + * occurred while setting up the encryption key. + */ +static inline int fscrypt_prepare_symlink(struct inode *dir, + const char *target, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); + + disk_link->name = (unsigned char *)target; + disk_link->len = len + 1; + if (disk_link->len > max_len) + return -ENAMETOOLONG; + return 0; +} + +/** + * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * @inode: symlink inode + * @target: plaintext symlink target + * @len: length of @target excluding null terminator + * @disk_link: (in/out) the on-disk symlink target being prepared + * + * If the symlink target needs to be encrypted, then this function encrypts it + * into @disk_link->name. fscrypt_prepare_symlink() must have been called + * previously to compute @disk_link->len. If the filesystem did not allocate a + * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one + * will be kmalloc()'ed and the filesystem will be responsible for freeing it. + * + * Return: 0 on success, -errno on failure + */ +static inline int fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + if (IS_ENCRYPTED(inode)) + return __fscrypt_encrypt_symlink(inode, target, len, disk_link); + return 0; +} + #endif /* _LINUX_FSCRYPT_H */ diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index c4c6bf2c390e..5777251400f9 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h @@ -13,6 +13,16 @@ #ifndef _LINUX_FSCRYPT_NOTSUPP_H #define _LINUX_FSCRYPT_NOTSUPP_H +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return false; +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return false; +} + /* crypto.c */ static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags) @@ -42,6 +52,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode, return -EOPNOTSUPP; } +static inline struct page *fscrypt_control_page(struct page *page) +{ + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); +} static inline void fscrypt_restore_control_page(struct page *page) { @@ -115,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) return; } -static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode, - u32 ilen) -{ - /* never happens */ - WARN_ON(1); - return 0; -} - static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, - u32 ilen, + u32 max_encrypted_len, struct fscrypt_str *crypto_str) { return -EOPNOTSUPP; @@ -143,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode, return -EOPNOTSUPP; } -static inline int fscrypt_fname_usr_to_disk(struct inode *inode, - const struct qstr *iname, - struct fscrypt_str *oname) -{ - return -EOPNOTSUPP; -} - static inline bool fscrypt_match_name(const struct fscrypt_name *fname, const u8 *de_name, u32 de_name_len) { @@ -207,4 +207,27 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, return -EOPNOTSUPP; } +static inline int __fscrypt_prepare_symlink(struct inode *dir, + unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline int __fscrypt_encrypt_symlink(struct inode *inode, + const char *target, + unsigned int len, + struct fscrypt_str *disk_link) +{ + return -EOPNOTSUPP; +} + +static inline void *fscrypt_get_symlink(struct inode *inode, + const void *caddr, + unsigned int max_size) +{ + return ERR_PTR(-EOPNOTSUPP); +} + #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index 2db5e9706f60..c88d2058902a 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h @@ -10,8 +10,54 @@ #ifndef _LINUX_FSCRYPT_SUPP_H #define _LINUX_FSCRYPT_SUPP_H +#include <linux/mm.h> +#include <linux/slab.h> + +/* + * fscrypt superblock flags + */ +#define FS_CFLG_OWN_PAGES (1U << 1) + +/* + * crypto operations for filesystems + */ +struct fscrypt_operations { + unsigned int flags; + const char *key_prefix; + int (*get_context)(struct inode *, void *, size_t); + int (*set_context)(struct inode *, const void *, size_t, void *); + bool (*dummy_context)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); +}; + +struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ +}; + +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return (inode->i_crypt_info != NULL); +} + +static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +{ + return inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode); +} + /* crypto.c */ -extern struct kmem_cache *fscrypt_info_cachep; extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); extern void fscrypt_release_ctx(struct fscrypt_ctx *); extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, @@ -19,6 +65,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, u64, gfp_t); extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, unsigned int, u64); + +static inline struct page *fscrypt_control_page(struct page *page) +{ + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; +} + extern void fscrypt_restore_control_page(struct page *); extern const struct dentry_operations fscrypt_d_ops; @@ -54,14 +106,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) kfree(fname->crypto_buf.name); } -extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, struct fscrypt_str *); extern void fscrypt_fname_free_buffer(struct fscrypt_str *); extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, const struct fscrypt_str *, struct fscrypt_str *); -extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, - struct fscrypt_str *); #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 @@ -152,5 +201,13 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *new_dentry, unsigned int flags); extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); +extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, + struct fscrypt_str *disk_link); +extern void *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size); #endif /* _LINUX_FSCRYPT_SUPP_H */ diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h index 93e080b39cf6..793590dcfb47 100644 --- a/include/linux/goldfish.h +++ b/include/linux/goldfish.h @@ -1,14 +1,19 @@ #ifndef __LINUX_GOLDFISH_H #define __LINUX_GOLDFISH_H +#include <linux/types.h> +#include <linux/io.h> + /* Helpers for Goldfish virtual platform */ static inline void gf_write_ptr(const void *ptr, void __iomem *portl, void __iomem *porth) { - writel((u32)(unsigned long)ptr, portl); + const uintptr_t addr = (uintptr_t)ptr; + + writel((u32)addr, portl); #ifdef CONFIG_64BIT - writel((unsigned long)ptr >> 32, porth); + writel(addr >> 32, porth); #endif } diff --git a/include/linux/habmm.h b/include/linux/habmm.h index b4b31dc36faf..842cd27fd372 100644 --- a/include/linux/habmm.h +++ b/include/linux/habmm.h @@ -143,6 +143,11 @@ int32_t habmm_socket_send(int32_t handle, void *src_buff, uint32_t size_bytes, */ #define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001 +/* In the blocking mode, this flag is used to indicate it is an + * uninterruptbile blocking call. + */ +#define HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE 0x00000002 + int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes, uint32_t timeout, uint32_t flags); @@ -209,6 +214,11 @@ int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff, */ #define HABMM_EXP_MEM_TYPE_DMA 0x00000001 +/* + * this flag is used for export from dma_buf fd or import to dma_buf fd + */ +#define HABMM_EXPIMP_FLAGS_FD 0x00010000 + #define HAB_MAX_EXPORT_SIZE 0x8000000 /* diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 5fdc55312334..2fb10601febe 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -1,6 +1,7 @@ #ifndef _LINUX_JIFFIES_H #define _LINUX_JIFFIES_H +#include <linux/cache.h> #include <linux/math64.h> #include <linux/kernel.h> #include <linux/types.h> @@ -63,19 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate); /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) -/* some arch's have a small-data section that can be accessed register-relative - * but that can only take up to, say, 4-byte variables. jiffies being part of - * an 8-byte variable may not be correctly accessed unless we force the issue - */ -#define __jiffy_data __attribute__((section(".data"))) +#ifndef __jiffy_arch_data +#define __jiffy_arch_data +#endif /* * The 64-bit value is not atomic - you MUST NOT read it * without sampling the sequence number in jiffies_lock. * get_jiffies_64() will do this for you as appropriate. */ -extern u64 __jiffy_data jiffies_64; -extern unsigned long volatile __jiffy_data jiffies; +extern u64 __cacheline_aligned_in_smp jiffies_64; +extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void); diff --git a/include/linux/llist.h b/include/linux/llist.h index fd4ca0b4fe0f..ac6796138ba0 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -88,6 +88,23 @@ static inline void init_llist_head(struct llist_head *list) container_of(ptr, type, member) /** + * member_address_is_nonnull - check whether the member address is not NULL + * @ptr: the object pointer (struct type * that contains the llist_node) + * @member: the name of the llist_node within the struct. + * + * This macro is conceptually the same as + * &ptr->member != NULL + * but it works around the fact that compilers can decide that taking a member + * address is never a NULL pointer. + * + * Real objects that start at a high address and have a member at NULL are + * unlikely to exist, but such pointers may be returned e.g. by the + * container_of() macro. + */ +#define member_address_is_nonnull(ptr, member) \ + ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0) + +/** * llist_for_each - iterate over some deleted entries of a lock-less list * @pos: the &struct llist_node to use as a loop cursor * @node: the first entry of deleted list entries @@ -121,7 +138,7 @@ static inline void init_llist_head(struct llist_head *list) */ #define llist_for_each_entry(pos, node, member) \ for ((pos) = llist_entry((node), typeof(*(pos)), member); \ - &(pos)->member != NULL; \ + member_address_is_nonnull(pos, member); \ (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member)) /** @@ -143,7 +160,7 @@ static inline void init_llist_head(struct llist_head *list) */ #define llist_for_each_entry_safe(pos, n, node, member) \ for (pos = llist_entry((node), typeof(*pos), member); \ - &pos->member != NULL && \ + member_address_is_nonnull(pos, member) && \ (n = llist_entry(pos->member.next, typeof(*n), member), true); \ pos = n) diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index fe052e234906..bb1018882199 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -465,6 +465,7 @@ struct mlx4_update_qp_params { u16 rate_val; }; +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index a91b67b18a73..5c93f4a89afa 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -635,8 +635,14 @@ enum { }; enum { - CQE_RSS_HTYPE_IP = 0x3 << 6, - CQE_RSS_HTYPE_L4 = 0x3 << 2, + CQE_RSS_HTYPE_IP = 0x3 << 2, + /* cqe->rss_hash_type[3:2] - IP destination selected for hash + * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) + */ + CQE_RSS_HTYPE_L4 = 0x3 << 6, + /* cqe->rss_hash_type[7:6] - L4 destination selected for hash + * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI + */ }; enum { diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 34da10bff4e1..f564303a28f9 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -84,6 +84,8 @@ struct mmc_ios { #define MMC_SET_DRIVER_TYPE_A 1 #define MMC_SET_DRIVER_TYPE_C 2 #define MMC_SET_DRIVER_TYPE_D 3 + + bool enhanced_strobe; /* hs400es selection */ }; /* states to represent load on the host */ @@ -600,6 +602,8 @@ struct mmc_host { #endif bool sdr104_wa; + atomic_t rpmb_req_pending; + struct mutex rpmb_req_mutex; unsigned long private[0] ____cacheline_aligned; }; @@ -825,6 +829,11 @@ static inline bool mmc_card_hs400(struct mmc_card *card) return card->host->ios.timing == MMC_TIMING_MMC_HS400; } +static inline bool mmc_card_hs400es(struct mmc_card *card) +{ + return card->host->ios.enhanced_strobe; +} + void mmc_retune_enable(struct mmc_host *host); void mmc_retune_disable(struct mmc_host *host); void mmc_retune_timer_stop(struct mmc_host *host); diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index d6c53fce006b..6923e4049de3 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -247,6 +247,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size); bool xt_find_jump_offset(const unsigned int *offsets, unsigned int target, unsigned int size); +int xt_check_proc_name(const char *name, unsigned int size); + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, bool inv_proto); int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, @@ -368,38 +370,14 @@ static inline unsigned long ifname_compare_aligned(const char *_a, return ret; } +struct xt_percpu_counter_alloc_state { + unsigned int off; + const char __percpu *mem; +}; -/* On SMP, ip(6)t_entry->counters.pcnt holds address of the - * real (percpu) counter. On !SMP, its just the packet count, - * so nothing needs to be done there. - * - * xt_percpu_counter_alloc returns the address of the percpu - * counter, or 0 on !SMP. We force an alignment of 16 bytes - * so that bytes/packets share a common cache line. - * - * Hence caller must use IS_ERR_VALUE to check for error, this - * allows us to return 0 for single core systems without forcing - * callers to deal with SMP vs. NONSMP issues. - */ -static inline unsigned long xt_percpu_counter_alloc(void) -{ - if (nr_cpu_ids > 1) { - void __percpu *res = __alloc_percpu(sizeof(struct xt_counters), - sizeof(struct xt_counters)); - - if (res == NULL) - return -ENOMEM; - - return (__force unsigned long) res; - } - - return 0; -} -static inline void xt_percpu_counter_free(u64 pcnt) -{ - if (nr_cpu_ids > 1) - free_percpu((void __percpu *) (unsigned long) pcnt); -} +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter); +void xt_percpu_counter_free(struct xt_counters *cnt); static inline struct xt_counters * xt_get_this_cpu_counter(struct xt_counters *cnt) diff --git a/include/linux/nospec.h b/include/linux/nospec.h index b99bced39ac2..e791ebc65c9c 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -5,6 +5,7 @@ #ifndef _LINUX_NOSPEC_H #define _LINUX_NOSPEC_H +#include <asm/barrier.h> /** * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise @@ -20,20 +21,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, unsigned long size) { /* - * Warn developers about inappropriate array_index_nospec() usage. - * - * Even if the CPU speculates past the WARN_ONCE branch, the - * sign bit of @index is taken into account when generating the - * mask. - * - * This warning is compiled out when the compiler can infer that - * @index and @size are less than LONG_MAX. - */ - if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, - "array_index_nospec() limited to range of [0, LONG_MAX]\n")) - return 0; - - /* * Always calculate and emit the mask even if the compiler * thinks the mask is not needed. The compiler does not take * into account the value of @index under speculation. @@ -66,7 +53,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ \ - _i &= _mask; \ - _i; \ + (typeof(_i)) (_i & _mask); \ }) #endif /* _LINUX_NOSPEC_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fbfadba81c5a..771774e13f10 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -153,7 +153,7 @@ static inline int page_cache_get_speculative(struct page *page) #ifdef CONFIG_TINY_RCU # ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic()); + VM_BUG_ON(!in_atomic() && !irqs_disabled()); # endif /* * Preempt must be disabled here - we rely on rcu_read_lock doing @@ -191,7 +191,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT_COUNT - VM_BUG_ON(!in_atomic()); + VM_BUG_ON(!in_atomic() && !irqs_disabled()); # endif VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_add(count, &page->_count); diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h index 1419133fa69e..4ac1a070af0a 100644 --- a/include/linux/platform_data/isl9305.h +++ b/include/linux/platform_data/isl9305.h @@ -24,7 +24,7 @@ struct regulator_init_data; struct isl9305_pdata { - struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR]; + struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1]; }; #endif diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 34c4498b800f..83b22ae9ae12 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -59,23 +59,23 @@ struct posix_clock_operations { int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); - int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts); + int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); - int (*clock_getres) (struct posix_clock *pc, struct timespec *ts); + int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts); int (*clock_settime)(struct posix_clock *pc, - const struct timespec *ts); + const struct timespec64 *ts); int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit); int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit); void (*timer_gettime)(struct posix_clock *pc, - struct k_itimer *kit, struct itimerspec *tsp); + struct k_itimer *kit, struct itimerspec64 *tsp); int (*timer_settime)(struct posix_clock *pc, struct k_itimer *kit, int flags, - struct itimerspec *tsp, struct itimerspec *old); + struct itimerspec64 *tsp, struct itimerspec64 *old); /* * Optional character device methods: */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b97bf2ef996e..b326d0a0cace 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -74,6 +74,12 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p #endif /* CONFIG_PROC_FS */ +#ifdef CONFIG_PROC_UID +extern void proc_register_uid(kuid_t uid); +#else +static inline void proc_register_uid(kuid_t uid) {} +#endif + struct net; static inline struct proc_dir_entry *proc_net_mkdir( diff --git a/include/linux/qdsp6v2/apr.h b/include/linux/qdsp6v2/apr.h index adcdbcbc5907..98432952f278 100644 --- a/include/linux/qdsp6v2/apr.h +++ b/include/linux/qdsp6v2/apr.h @@ -69,7 +69,9 @@ struct apr_hdr { #define APR_DOMAIN_MODEM 0x3 #define APR_DOMAIN_ADSP 0x4 #define APR_DOMAIN_APPS 0x5 -#define APR_DOMAIN_MAX 0x6 +#define APR_DOMAIN_SDSP 0x8 +#define APR_DOMAIN_MAX 0x9 + /* ADSP service IDs */ #define APR_SVC_TEST_CLIENT 0x2 @@ -94,6 +96,9 @@ struct apr_hdr { #define APR_SVC_CVP 0x6 #define APR_SVC_SRD 0x7 +/* Sensor DSP Micro Audio Service IDs */ +#define APR_SVC_MAS 0x3 + /* APR Port IDs */ #define APR_MAX_PORTS 0x80 diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h index bf324064960b..9b35c9f9882d 100644 --- a/include/linux/qdsp6v2/apr_tal.h +++ b/include/linux/qdsp6v2/apr_tal.h @@ -27,7 +27,8 @@ #define APR_DEST_MODEM 0 #define APR_DEST_QDSP6 1 -#define APR_DEST_MAX 2 +#define APR_DEST_DSPS 3 +#define APR_DEST_MAX 4 #if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \ defined(CONFIG_MSM_QDSP6_APRV3_GLINK) diff --git a/include/linux/sched.h b/include/linux/sched.h index 000b0f02ce37..81f422018aa1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1807,6 +1807,10 @@ struct task_struct { cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; +#ifdef CONFIG_CPU_FREQ_TIMES + u64 *time_in_state; + unsigned int max_state; +#endif struct prev_cputime prev_cputime; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqlock_t vtime_seqlock; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b5421f6f155a..a6da214d0584 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -879,10 +879,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); -int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, - int offset, int len); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, - int len); +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, + int offset, int len); int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); int skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) consume_skb(a) diff --git a/include/linux/tty.h b/include/linux/tty.h index 83b264c52898..a1042afff99a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -372,6 +372,7 @@ extern void proc_clear_tty(struct task_struct *p); extern struct tty_struct *get_current_tty(void); /* tty_io.c */ extern int __init tty_init(void); +extern const char *tty_name(const struct tty_struct *tty); #else static inline void console_init(void) { } @@ -392,6 +393,8 @@ static inline struct tty_struct *get_current_tty(void) /* tty_io.c */ static inline int __init tty_init(void) { return 0; } +static inline const char *tty_name(const struct tty_struct *tty) +{ return "(none)"; } #endif extern void tty_write_flush(struct tty_struct *); @@ -420,7 +423,6 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode, const char *routine); -extern const char *tty_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern int __tty_check_change(struct tty_struct *tty, int sig); extern int tty_check_change(struct tty_struct *tty); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 143e556f141d..0e61b1f65359 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -770,8 +770,20 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) /** + * usb_ep_align - returns @len aligned to ep's maxpacketsize. + * @ep: the endpoint whose maxpacketsize is used to align @len + * @len: buffer size's length to align to @ep's maxpacketsize + * + * This helper is used to align buffer's size to an ep's maxpacketsize. + */ +static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) +{ + return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize)); +} + +/** * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget - * requires quirk_ep_out_aligned_size, otherwise reguens len. + * requires quirk_ep_out_aligned_size, otherwise returns len. * @g: controller to check for quirk * @ep: the endpoint whose maxpacketsize is used to align @len * @len: buffer size's length to align to @ep's maxpacketsize @@ -782,8 +794,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) static inline size_t usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len) { - return !g->quirk_ep_out_aligned_size ? len : - round_up(len, (size_t)ep->desc->wMaxPacketSize); + return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len; } /** diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index de2a722fe3cf..ea4f81c2a6d5 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -56,4 +56,7 @@ */ #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) +/* Device needs a pause after every control message. */ +#define USB_QUIRK_DELAY_CTRL_MSG BIT(13) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4e4aee64f559..0703a632e340 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -451,6 +451,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, int max_active); +extern struct work_struct *current_work(void); extern bool current_is_workqueue_rescuer(void); extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); extern unsigned int work_busy(struct work_struct *work); diff --git a/include/media/msmb_generic_buf_mgr.h b/include/media/msmb_generic_buf_mgr.h index 6588420fd6fd..3cb82668acde 100644 --- a/include/media/msmb_generic_buf_mgr.h +++ b/include/media/msmb_generic_buf_mgr.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,6 +43,8 @@ struct msm_buf_mngr_info32_t { #define VIDIOC_MSM_BUF_MNGR_FLUSH32 \ _IOWR('V', BASE_VIDIOC_PRIVATE + 39, struct msm_buf_mngr_info32_t) +#define VIDIOC_MSM_BUF_MNGR_BUF_ERROR32 \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 41, struct msm_buf_mngr_info32_t) #endif #endif diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 07f27b15e6fe..881c5d46a66c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -996,9 +996,9 @@ enum rate_info_flags { * @RATE_INFO_BW_160: 160 MHz bandwidth */ enum rate_info_bw { + RATE_INFO_BW_20 = 0, RATE_INFO_BW_5, RATE_INFO_BW_10, - RATE_INFO_BW_20, RATE_INFO_BW_40, RATE_INFO_BW_80, RATE_INFO_BW_160, diff --git a/include/net/tcp.h b/include/net/tcp.h index 340b01dd8c37..e4f8cde33789 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1216,9 +1216,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, static inline int tcp_win_from_space(int space) { - return sysctl_tcp_adv_win_scale<=0 ? - (space>>(-sysctl_tcp_adv_win_scale)) : - space - (space>>sysctl_tcp_adv_win_scale); + int tcp_adv_win_scale = sysctl_tcp_adv_win_scale; + + return tcp_adv_win_scale <= 0 ? + (space>>(-tcp_adv_win_scale)) : + space - (space>>tcp_adv_win_scale); } /* Note: caller must be prepared to deal with negative returns */ diff --git a/include/net/udplite.h b/include/net/udplite.h index 80761938b9a7..8228155b305e 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) UDP_SKB_CB(skb)->cscov = cscov; if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; + skb->csum_valid = 0; } return 0; diff --git a/include/net/x25.h b/include/net/x25.h index c383aa4edbf0..6d30a01d281d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *); /* sysctl_net_x25.c */ #ifdef CONFIG_SYSCTL -void x25_register_sysctl(void); +int x25_register_sysctl(void); void x25_unregister_sysctl(void); #else -static inline void x25_register_sysctl(void) {}; +static inline int x25_register_sysctl(void) { return 0; }; static inline void x25_unregister_sysctl(void) {}; #endif /* CONFIG_SYSCTL */ diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index a78ff97eb249..d77416963f05 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -123,6 +123,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, const unsigned char *dst_dev_addr); int rdma_addr_size(struct sockaddr *addr); +int rdma_addr_size_in6(struct sockaddr_in6 *addr); +int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr); int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); int rdma_addr_find_dmac_by_grh(const union ib_gid *sgid, const union ib_gid *dgid, diff --git a/include/soc/qcom/boot_stats.h b/include/soc/qcom/boot_stats.h index b76d5676b555..5b82aa0bedc3 100644 --- a/include/soc/qcom/boot_stats.h +++ b/include/soc/qcom/boot_stats.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2014,2016,2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -41,6 +41,6 @@ unsigned long long int msm_timer_get_sclk_ticks(void) { return 0; } static inline int boot_marker_enabled(void) { return 1; } void place_marker(const char *name); #else -inline void place_marker(char *name); +static inline void place_marker(char *name) { }; static inline int boot_marker_enabled(void) { return 0; } #endif diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h index 5031e62beb17..922ebb69205d 100644 --- a/include/sound/q6afe-v2.h +++ b/include/sound/q6afe-v2.h @@ -208,6 +208,87 @@ enum { AFE_MAX_PORTS }; + +enum { + IDX_PRIMARY_TDM_RX_0, + IDX_PRIMARY_TDM_RX_1, + IDX_PRIMARY_TDM_RX_2, + IDX_PRIMARY_TDM_RX_3, + IDX_PRIMARY_TDM_RX_4, + IDX_PRIMARY_TDM_RX_5, + IDX_PRIMARY_TDM_RX_6, + IDX_PRIMARY_TDM_RX_7, + IDX_PRIMARY_TDM_TX_0, + IDX_PRIMARY_TDM_TX_1, + IDX_PRIMARY_TDM_TX_2, + IDX_PRIMARY_TDM_TX_3, + IDX_PRIMARY_TDM_TX_4, + IDX_PRIMARY_TDM_TX_5, + IDX_PRIMARY_TDM_TX_6, + IDX_PRIMARY_TDM_TX_7, + IDX_SECONDARY_TDM_RX_0, + IDX_SECONDARY_TDM_RX_1, + IDX_SECONDARY_TDM_RX_2, + IDX_SECONDARY_TDM_RX_3, + IDX_SECONDARY_TDM_RX_4, + IDX_SECONDARY_TDM_RX_5, + IDX_SECONDARY_TDM_RX_6, + IDX_SECONDARY_TDM_RX_7, + IDX_SECONDARY_TDM_TX_0, + IDX_SECONDARY_TDM_TX_1, + IDX_SECONDARY_TDM_TX_2, + IDX_SECONDARY_TDM_TX_3, + IDX_SECONDARY_TDM_TX_4, + IDX_SECONDARY_TDM_TX_5, + IDX_SECONDARY_TDM_TX_6, + IDX_SECONDARY_TDM_TX_7, + IDX_TERTIARY_TDM_RX_0, + IDX_TERTIARY_TDM_RX_1, + IDX_TERTIARY_TDM_RX_2, + IDX_TERTIARY_TDM_RX_3, + IDX_TERTIARY_TDM_RX_4, + IDX_TERTIARY_TDM_RX_5, + IDX_TERTIARY_TDM_RX_6, + IDX_TERTIARY_TDM_RX_7, + IDX_TERTIARY_TDM_TX_0, + IDX_TERTIARY_TDM_TX_1, + IDX_TERTIARY_TDM_TX_2, + IDX_TERTIARY_TDM_TX_3, + IDX_TERTIARY_TDM_TX_4, + IDX_TERTIARY_TDM_TX_5, + IDX_TERTIARY_TDM_TX_6, + IDX_TERTIARY_TDM_TX_7, + IDX_QUATERNARY_TDM_RX_0, + IDX_QUATERNARY_TDM_RX_1, + IDX_QUATERNARY_TDM_RX_2, + IDX_QUATERNARY_TDM_RX_3, + IDX_QUATERNARY_TDM_RX_4, + IDX_QUATERNARY_TDM_RX_5, + IDX_QUATERNARY_TDM_RX_6, + IDX_QUATERNARY_TDM_RX_7, + IDX_QUATERNARY_TDM_TX_0, + IDX_QUATERNARY_TDM_TX_1, + IDX_QUATERNARY_TDM_TX_2, + IDX_QUATERNARY_TDM_TX_3, + IDX_QUATERNARY_TDM_TX_4, + IDX_QUATERNARY_TDM_TX_5, + IDX_QUATERNARY_TDM_TX_6, + IDX_QUATERNARY_TDM_TX_7, + IDX_TDM_MAX, +}; + +enum { + IDX_GROUP_PRIMARY_TDM_RX, + IDX_GROUP_PRIMARY_TDM_TX, + IDX_GROUP_SECONDARY_TDM_RX, + IDX_GROUP_SECONDARY_TDM_TX, + IDX_GROUP_TERTIARY_TDM_RX, + IDX_GROUP_TERTIARY_TDM_TX, + IDX_GROUP_QUATERNARY_TDM_RX, + IDX_GROUP_QUATERNARY_TDM_TX, + IDX_GROUP_TDM_MAX, +}; + enum afe_mad_type { MAD_HW_NONE = 0x00, MAD_HW_AUDIO = 0x01, diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index f5024c560d8f..9c4eb33c5a1d 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, #include <trace/define_trace.h> -#else /* !CONFIG_PREEMPTIRQ_EVENTS */ +#endif /* !CONFIG_PREEMPTIRQ_EVENTS */ +#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING) #define trace_irq_enable(...) #define trace_irq_disable(...) -#define trace_preempt_enable(...) -#define trace_preempt_disable(...) #define trace_irq_enable_rcuidle(...) #define trace_irq_disable_rcuidle(...) +#endif + +#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT) +#define trace_preempt_enable(...) +#define trace_preempt_disable(...) #define trace_preempt_enable_rcuidle(...) #define trace_preempt_disable_rcuidle(...) - #endif diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index beed138bd359..f85ed3a5ef4d 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -43,6 +43,27 @@ /* (1U << 31) is reserved for signed error codes */ /* + * Set/Get write life time hints. {GET,SET}_RW_HINT operate on the + * underlying inode, while {GET,SET}_FILE_RW_HINT operate only on + * the specific file. + */ +#define F_GET_RW_HINT (F_LINUX_SPECIFIC_BASE + 11) +#define F_SET_RW_HINT (F_LINUX_SPECIFIC_BASE + 12) +#define F_GET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 13) +#define F_SET_FILE_RW_HINT (F_LINUX_SPECIFIC_BASE + 14) + +/* + * Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be + * used to clear any hints previously set. + */ +#define RWF_WRITE_LIFE_NOT_SET 0 +#define RWH_WRITE_LIFE_NONE 1 +#define RWH_WRITE_LIFE_SHORT 2 +#define RWH_WRITE_LIFE_MEDIUM 3 +#define RWH_WRITE_LIFE_LONG 4 +#define RWH_WRITE_LIFE_EXTREME 5 + +/* * Types of directory notifications that may be requested. */ #define DN_ACCESS 0x00000001 /* File accessed */ diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 1becea86c73c..eb3c786afa70 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -106,7 +106,7 @@ #define PCI_SUBSYSTEM_ID 0x2e #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ #define PCI_ROM_ADDRESS_ENABLE 0x01 -#define PCI_ROM_ADDRESS_MASK (~0x7ffUL) +#define PCI_ROM_ADDRESS_MASK (~0x7ffU) #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h index c6f5b096c594..6f6d93f6c406 100644 --- a/include/uapi/linux/usb/audio.h +++ b/include/uapi/linux/usb/audio.h @@ -370,7 +370,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d { return (protocol == UAC_VERSION_1) ? desc->baSourceID[desc->bNrInPins + 4] : - desc->baSourceID[desc->bNrInPins + 6]; + 2; /* in UAC2, this value is constant */ } static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, @@ -378,7 +378,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de { return (protocol == UAC_VERSION_1) ? &desc->baSourceID[desc->bNrInPins + 5] : - &desc->baSourceID[desc->bNrInPins + 7]; + &desc->baSourceID[desc->bNrInPins + 6]; } static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, diff --git a/include/uapi/media/ais/msm_ais_sensor.h b/include/uapi/media/ais/msm_ais_sensor.h index 59c20c8e84ae..ca9bcf96bcb0 100644 --- a/include/uapi/media/ais/msm_ais_sensor.h +++ b/include/uapi/media/ais/msm_ais_sensor.h @@ -199,6 +199,28 @@ enum msm_sensor_event_idx { #define SENSOR_EVENT_BASE (V4L2_EVENT_PRIVATE_START) #define SENSOR_EVENT_SIGNAL_STATUS (SENSOR_EVENT_BASE + SENSOR_SIGNAL_STATUS) +struct msm_csid_event_data { + uint8_t csid_id; + uint32_t error_status; +}; + +enum msm_csid_event_mask_index { + CSID_EVENT_MASK_INDEX_SIGNAL_ERROR = 2, +}; + +#define CSID_EVENT_SUBS_MASK_NONE 0 + +#define CSID_EVENT_SUBS_MASK_SIGNAL_ERROR \ + (1 << CSID_EVENT_MASK_INDEX_SIGNAL_ERROR) + +enum msm_csid_event_idx { + CSID_SIGNAL_ERROR = 2, + CSID_EVENT_MAX = 15 +}; + +#define CSID_EVENT_BASE (V4L2_EVENT_PRIVATE_START + SENSOR_EVENT_MAX) +#define CSID_EVENT_SIGNAL_ERROR (CSID_EVENT_BASE + CSID_SIGNAL_ERROR) + struct msm_camera_i2c_array_write_config { struct msm_camera_i2c_reg_setting conf_array; uint16_t slave_addr; diff --git a/include/uapi/media/msm_camera.h b/include/uapi/media/msm_camera.h index 39e6927d9b7e..fd0937ffb1e5 100644 --- a/include/uapi/media/msm_camera.h +++ b/include/uapi/media/msm_camera.h @@ -1,4 +1,5 @@ -/* Copyright (c) 2009-2012, 2014-2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2009-2012, 2014-2016, 2018 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1386,6 +1387,7 @@ struct msm_camera_csiphy_params { uint16_t lane_mask; uint8_t combo_mode; uint8_t csid_core; + uint64_t data_rate; }; struct msm_camera_csi2_params { diff --git a/include/uapi/media/msm_camsensor_sdk.h b/include/uapi/media/msm_camsensor_sdk.h index 08605aca474d..ac454ca9a7fc 100644 --- a/include/uapi/media/msm_camsensor_sdk.h +++ b/include/uapi/media/msm_camsensor_sdk.h @@ -367,6 +367,7 @@ struct msm_camera_csiphy_params { unsigned char csid_core; unsigned int csiphy_clk; unsigned char csi_3phase; + uint64_t data_rate; }; struct msm_camera_i2c_seq_reg_array { diff --git a/include/uapi/media/msmb_generic_buf_mgr.h b/include/uapi/media/msmb_generic_buf_mgr.h index 2961cae1e7c1..8dad9ae92cf8 100644 --- a/include/uapi/media/msmb_generic_buf_mgr.h +++ b/include/uapi/media/msmb_generic_buf_mgr.h @@ -62,5 +62,7 @@ struct msm_buf_mngr_main_cont_info { _IOWR('V', BASE_VIDIOC_PRIVATE + 40, \ struct msm_camera_private_ioctl_arg) +#define VIDIOC_MSM_BUF_MNGR_BUF_ERROR \ + _IOWR('V', BASE_VIDIOC_PRIVATE + 41, struct msm_buf_mngr_info) #endif diff --git a/init/main.c b/init/main.c index e0390d396e47..84d4bceb1db1 100644 --- a/init/main.c +++ b/init/main.c @@ -88,7 +88,7 @@ #include <asm/setup.h> #include <asm/sections.h> #include <asm/cacheflush.h> - +#include <soc/qcom/boot_stats.h> static int kernel_init(void *); extern void init_IRQ(void); @@ -962,6 +962,7 @@ static int __ref kernel_init(void *unused) numa_default_policy(); flush_delayed_fput(); + place_marker("M : Kernel End"); if (ramdisk_execute_command) { ret = run_init_process(ramdisk_execute_command); diff --git a/kernel/audit.c b/kernel/audit.c index e228b88dfd23..d8e9fba58cbc 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -64,7 +64,6 @@ #include <linux/security.h> #endif #include <linux/freezer.h> -#include <linux/tty.h> #include <linux/pid_namespace.h> #include <net/netns/generic.h> @@ -1882,21 +1881,14 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) { const struct cred *cred; char comm[sizeof(tsk->comm)]; - char *tty; + struct tty_struct *tty; if (!ab) return; /* tsk == current */ cred = current_cred(); - - spin_lock_irq(&tsk->sighand->siglock); - if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) - tty = tsk->signal->tty->name; - else - tty = "(none)"; - spin_unlock_irq(&tsk->sighand->siglock); - + tty = audit_get_tty(tsk); audit_log_format(ab, " ppid=%d pid=%d auid=%u uid=%u gid=%u" " euid=%u suid=%u fsuid=%u" @@ -1912,11 +1904,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk) from_kgid(&init_user_ns, cred->egid), from_kgid(&init_user_ns, cred->sgid), from_kgid(&init_user_ns, cred->fsgid), - tty, audit_get_sessionid(tsk)); - + tty ? tty_name(tty) : "(none)", + audit_get_sessionid(tsk)); + audit_put_tty(tty); audit_log_format(ab, " comm="); audit_log_untrustedstring(ab, get_task_comm(comm, tsk)); - audit_log_d_path_exe(ab, tsk->mm); audit_log_task_context(ab); } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 63f0e495f517..6375465af0a7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1976,6 +1976,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, { struct audit_buffer *ab; uid_t uid, oldloginuid, loginuid; + struct tty_struct *tty; if (!audit_enabled) return; @@ -1983,14 +1984,17 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid, uid = from_kuid(&init_user_ns, task_uid(current)); oldloginuid = from_kuid(&init_user_ns, koldloginuid); loginuid = from_kuid(&init_user_ns, kloginuid), + tty = audit_get_tty(current); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN); if (!ab) return; audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid); audit_log_task_context(ab); - audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d", - oldloginuid, loginuid, oldsessionid, sessionid, !rc); + audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d", + oldloginuid, loginuid, tty ? tty_name(tty) : "(none)", + oldsessionid, sessionid, !rc); + audit_put_tty(tty); audit_log_end(ab); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 424accd20c2d..dc19b6e210e6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -673,7 +673,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz union bpf_attr attr = {}; int err; - if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) + if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) return -EPERM; if (!access_ok(VERIFY_READ, uattr, 1)) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c14003840bc5..79e3c21a35d0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1135,7 +1135,8 @@ static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) regs[insn->dst_reg].type = UNKNOWN_VALUE; regs[insn->dst_reg].map_ptr = NULL; } - } else { + } else if (BPF_CLASS(insn->code) == BPF_ALU64 || + insn->imm >= 0) { /* case: R = imm * remember the value we stored into this reg */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 322f63370038..656d55d30f8d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5387,9 +5387,6 @@ static void perf_output_read_one(struct perf_output_handle *handle, __output_copy(handle, values, n * sizeof(u64)); } -/* - * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. - */ static void perf_output_read_group(struct perf_output_handle *handle, struct perf_event *event, u64 enabled, u64 running) @@ -5434,6 +5431,13 @@ static void perf_output_read_group(struct perf_output_handle *handle, #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ PERF_FORMAT_TOTAL_TIME_RUNNING) +/* + * XXX PERF_SAMPLE_READ vs inherited events seems difficult. + * + * The problem is that its both hard and excessively expensive to iterate the + * child list, not to mention that its impossible to IPI the children running + * on another CPU, from interrupt/NMI context. + */ static void perf_output_read(struct perf_output_handle *handle, struct perf_event *event) { @@ -8167,9 +8171,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, local64_set(&hwc->period_left, hwc->sample_period); /* - * we currently do not support PERF_FORMAT_GROUP on inherited events + * We currently do not support PERF_SAMPLE_READ on inherited events. + * See perf_output_read(). */ - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) + if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) goto err_ns; if (!has_branch_stack(event)) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 7da5b674d16e..d97a5430d580 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -427,16 +427,9 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); * modify_user_hw_breakpoint - modify a user-space hardware breakpoint * @bp: the breakpoint structure to modify * @attr: new breakpoint attributes - * @triggered: callback to trigger when we hit the breakpoint - * @tsk: pointer to 'task_struct' of the process to which the address belongs */ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) { - u64 old_addr = bp->attr.bp_addr; - u64 old_len = bp->attr.bp_len; - int old_type = bp->attr.bp_type; - int err = 0; - /* * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it * will not be possible to raise IPIs that invoke __perf_event_disable. @@ -451,27 +444,18 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att bp->attr.bp_addr = attr->bp_addr; bp->attr.bp_type = attr->bp_type; bp->attr.bp_len = attr->bp_len; + bp->attr.disabled = 1; - if (attr->disabled) - goto end; - - err = validate_hw_breakpoint(bp); - if (!err) - perf_event_enable(bp); + if (!attr->disabled) { + int err = validate_hw_breakpoint(bp); - if (err) { - bp->attr.bp_addr = old_addr; - bp->attr.bp_type = old_type; - bp->attr.bp_len = old_len; - if (!bp->attr.disabled) - perf_event_enable(bp); + if (err) + return err; - return err; + perf_event_enable(bp); + bp->attr.disabled = 0; } -end: - bp->attr.disabled = attr->disabled; - return 0; } EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); diff --git a/kernel/exit.c b/kernel/exit.c index 06d54f550c36..222c55208b5c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -54,6 +54,7 @@ #include <linux/writeback.h> #include <linux/shm.h> #include <linux/kcov.h> +#include <linux/cpufreq_times.h> #include "sched/tune.h" @@ -173,6 +174,9 @@ void release_task(struct task_struct *p) { struct task_struct *leader; int zap_leader; +#ifdef CONFIG_CPU_FREQ_TIMES + cpufreq_task_times_exit(p); +#endif repeat: /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ diff --git a/kernel/futex.c b/kernel/futex.c index a09c1dd1f659..760a97da1050 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -470,6 +470,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page, *page_head; + struct address_space *mapping; int err, ro = 0; /* @@ -555,7 +556,19 @@ again: } #endif - lock_page(page_head); + /* + * The treatment of mapping from this point on is critical. The page + * lock protects many things but in this context the page lock + * stabilizes mapping, prevents inode freeing in the shared + * file-backed region case and guards against movement to swap cache. + * + * Strictly speaking the page lock is not needed in all cases being + * considered here and page lock forces unnecessarily serialization + * From this point on, mapping will be re-verified if necessary and + * page lock will be acquired only if it is unavoidable + */ + + mapping = READ_ONCE(page_head->mapping); /* * If page_head->mapping is NULL, then it cannot be a PageAnon @@ -572,18 +585,31 @@ again: * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for page_head->mapping. */ - if (!page_head->mapping) { - int shmem_swizzled = PageSwapCache(page_head); + if (unlikely(!mapping)) { + int shmem_swizzled; + + /* + * Page lock is required to identify which special case above + * applies. If this is really a shmem page then the page lock + * will prevent unexpected transitions. + */ + lock_page(page); + shmem_swizzled = PageSwapCache(page) || page->mapping; unlock_page(page_head); put_page(page_head); + if (shmem_swizzled) goto again; + return -EFAULT; } /* * Private mappings are handled in a simple way. * + * If the futex key is stored on an anonymous page, then the associated + * object is the mm which is implicitly pinned by the calling process. + * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. @@ -601,16 +627,74 @@ again: key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; + + get_futex_key_refs(key); /* implies smp_mb(); (B) */ + } else { + struct inode *inode; + + /* + * The associated futex object in this case is the inode and + * the page->mapping must be traversed. Ordinarily this should + * be stabilised under page lock but it's not strictly + * necessary in this case as we just want to pin the inode, not + * update the radix tree or anything like that. + * + * The RCU read lock is taken as the inode is finally freed + * under RCU. If the mapping still matches expectations then the + * mapping->host can be safely accessed as being a valid inode. + */ + rcu_read_lock(); + + if (READ_ONCE(page_head->mapping) != mapping) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + inode = READ_ONCE(mapping->host); + if (!inode) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* + * Take a reference unless it is about to be freed. Previously + * this reference was taken by ihold under the page lock + * pinning the inode in place so i_lock was unnecessary. The + * only way for this check to fail is if the inode was + * truncated in parallel so warn for now if this happens. + * + * We are not calling into get_futex_key_refs() in file-backed + * cases, therefore a successful atomic_inc return below will + * guarantee that get_futex_key() will still imply smp_mb(); (B). + */ + if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) { + rcu_read_unlock(); + put_page(page_head); + + goto again; + } + + /* Should be impossible but lets be paranoid for now */ + if (WARN_ON_ONCE(inode->i_mapping != mapping)) { + err = -EFAULT; + rcu_read_unlock(); + iput(inode); + + goto out; + } + key->both.offset |= FUT_OFF_INODE; /* inode-based key */ - key->shared.inode = page_head->mapping->host; + key->shared.inode = inode; key->shared.pgoff = basepage_index(page); + rcu_read_unlock(); } - get_futex_key_refs(key); /* implies MB (B) */ - out: - unlock_page(page_head); put_page(page_head); return err; } diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 2c2effdb4437..0eb9b064779c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -854,7 +854,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) * This code is triggered unconditionally. Check the affinity * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. */ - if (desc->irq_common_data.affinity) + if (cpumask_available(desc->irq_common_data.affinity)) cpumask_copy(mask, desc->irq_common_data.affinity); else valid = false; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 695763516908..bbe9dd0886bd 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -125,7 +125,7 @@ static void *alloc_insn_page(void) return module_alloc(PAGE_SIZE); } -static void free_insn_page(void *page) +void __weak free_insn_page(void *page) { module_memfree(page); } diff --git a/kernel/pid.c b/kernel/pid.c index b17263be9082..5fe7cdb6d05f 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns) } if (unlikely(is_child_reaper(pid))) { - if (pid_ns_prepare_proc(ns)) + if (pid_ns_prepare_proc(ns)) { + disable_pid_allocation(ns); goto out_free; + } } get_pid_ns(ns); diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c index d5760c42f042..61d41ca41844 100644 --- a/kernel/printk/braille.c +++ b/kernel/printk/braille.c @@ -2,12 +2,13 @@ #include <linux/kernel.h> #include <linux/console.h> +#include <linux/errno.h> #include <linux/string.h> #include "console_cmdline.h" #include "braille.h" -char *_braille_console_setup(char **str, char **brl_options) +int _braille_console_setup(char **str, char **brl_options) { if (!strncmp(*str, "brl,", 4)) { *brl_options = ""; @@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options) } else if (!strncmp(*str, "brl=", 4)) { *brl_options = *str + 4; *str = strchr(*brl_options, ','); - if (!*str) + if (!*str) { pr_err("need port name after brl=\n"); - else - *((*str)++) = 0; - } else - return NULL; + return -EINVAL; + } + *((*str)++) = 0; + } - return *str; + return 0; } int diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h index 769d771145c8..749a6756843a 100644 --- a/kernel/printk/braille.h +++ b/kernel/printk/braille.h @@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options) c->brl_options = brl_options; } -char * +/* + * Setup console according to braille options. + * Return -EINVAL on syntax error, 0 on success (or no braille option was + * actually given). + * Modifies str to point to the serial options + * Sets brl_options to the parsed braille options. + */ +int _braille_console_setup(char **str, char **brl_options); int @@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options) { } -static inline char * +static inline int _braille_console_setup(char **str, char **brl_options) { - return NULL; + return 0; } static inline int diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03b59c330bdd..fffc50b0191f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -77,6 +77,7 @@ #include <linux/compiler.h> #include <linux/irq.h> #include <linux/sched/core_ctl.h> +#include <linux/cpufreq_times.h> #include <asm/switch_to.h> #include <asm/tlb.h> @@ -628,7 +629,8 @@ void resched_cpu(int cpu) unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); - resched_curr(rq); + if (cpu_online(cpu) || cpu == smp_processor_id()) + resched_curr(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -2364,6 +2366,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif +#ifdef CONFIG_CPU_FREQ_TIMES + cpufreq_task_times_init(p); +#endif + RB_CLEAR_NODE(&p->dl.rb_node); init_dl_task_timer(&p->dl); __dl_clear_params(p); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 692d1f888f17..bd4ef2bb551e 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -4,6 +4,7 @@ #include <linux/kernel_stat.h> #include <linux/static_key.h> #include <linux/context_tracking.h> +#include <linux/cpufreq_times.h> #include "sched.h" @@ -160,6 +161,11 @@ void account_user_time(struct task_struct *p, cputime_t cputime, /* Account for user time used */ acct_account_cputime(p); + +#ifdef CONFIG_CPU_FREQ_TIMES + /* Account power usage for user time */ + cpufreq_acct_update_power(p, cputime); +#endif } /* @@ -210,6 +216,10 @@ void __account_system_time(struct task_struct *p, cputime_t cputime, /* Account for system time used */ acct_account_cputime(p); +#ifdef CONFIG_CPU_FREQ_TIMES + /* Account power usage for system time */ + cpufreq_acct_update_power(p, cputime); +#endif } /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 23e37b0674df..f962ab1eb046 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2402,7 +2402,8 @@ void task_numa_work(struct callback_head *work) return; - down_read(&mm->mmap_sem); + if (!down_read_trylock(&mm->mmap_sem)) + return; vma = find_vma(mm, start); if (!vma) { reset_ptenuma_scan(p); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 05d635c2beec..fb7f36fd6e5b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2600,7 +2600,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p) if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) queue_push_tasks(rq); #endif /* CONFIG_SMP */ - if (p->prio < rq->curr->prio) + if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq))) resched_curr(rq); } } diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 9cff0ab82b63..e24008c098c6 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -300,14 +300,17 @@ out: static int pc_clock_gettime(clockid_t id, struct timespec *ts) { struct posix_clock_desc cd; + struct timespec64 ts64; int err; err = get_clock_desc(id, &cd); if (err) return err; - if (cd.clk->ops.clock_gettime) - err = cd.clk->ops.clock_gettime(cd.clk, ts); + if (cd.clk->ops.clock_gettime) { + err = cd.clk->ops.clock_gettime(cd.clk, &ts64); + *ts = timespec64_to_timespec(ts64); + } else err = -EOPNOTSUPP; @@ -319,14 +322,17 @@ static int pc_clock_gettime(clockid_t id, struct timespec *ts) static int pc_clock_getres(clockid_t id, struct timespec *ts) { struct posix_clock_desc cd; + struct timespec64 ts64; int err; err = get_clock_desc(id, &cd); if (err) return err; - if (cd.clk->ops.clock_getres) - err = cd.clk->ops.clock_getres(cd.clk, ts); + if (cd.clk->ops.clock_getres) { + err = cd.clk->ops.clock_getres(cd.clk, &ts64); + *ts = timespec64_to_timespec(ts64); + } else err = -EOPNOTSUPP; @@ -337,6 +343,7 @@ static int pc_clock_getres(clockid_t id, struct timespec *ts) static int pc_clock_settime(clockid_t id, const struct timespec *ts) { + struct timespec64 ts64 = timespec_to_timespec64(*ts); struct posix_clock_desc cd; int err; @@ -350,7 +357,7 @@ static int pc_clock_settime(clockid_t id, const struct timespec *ts) } if (cd.clk->ops.clock_settime) - err = cd.clk->ops.clock_settime(cd.clk, ts); + err = cd.clk->ops.clock_settime(cd.clk, &ts64); else err = -EOPNOTSUPP; out: @@ -403,29 +410,36 @@ static void pc_timer_gettime(struct k_itimer *kit, struct itimerspec *ts) { clockid_t id = kit->it_clock; struct posix_clock_desc cd; + struct itimerspec64 ts64; if (get_clock_desc(id, &cd)) return; - if (cd.clk->ops.timer_gettime) - cd.clk->ops.timer_gettime(cd.clk, kit, ts); - + if (cd.clk->ops.timer_gettime) { + cd.clk->ops.timer_gettime(cd.clk, kit, &ts64); + *ts = itimerspec64_to_itimerspec(&ts64); + } put_clock_desc(&cd); } static int pc_timer_settime(struct k_itimer *kit, int flags, struct itimerspec *ts, struct itimerspec *old) { + struct itimerspec64 ts64 = itimerspec_to_itimerspec64(ts); clockid_t id = kit->it_clock; struct posix_clock_desc cd; + struct itimerspec64 old64; int err; err = get_clock_desc(id, &cd); if (err) return err; - if (cd.clk->ops.timer_settime) - err = cd.clk->ops.timer_settime(cd.clk, kit, flags, ts, old); + if (cd.clk->ops.timer_settime) { + err = cd.clk->ops.timer_settime(cd.clk, kit, flags, &ts64, &old64); + if (old) + *old = itimerspec64_to_itimerspec(&old64); + } else err = -EOPNOTSUPP; diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 699aff70bbfa..1986ab6e5943 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -210,6 +210,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate) update_clock_read_data(&rd); + if (sched_clock_timer.function != NULL) { + /* update timeout for clock wrap */ + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + } + r = rate; if (r >= 4000000) { r /= 1000000; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 83aa1f867b97..998d730401b3 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -16,6 +16,7 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> +#include <linux/nmi.h> #include <asm/uaccess.h> @@ -86,6 +87,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base, next_one: i = 0; + + touch_nmi_watchdog(); + raw_spin_lock_irqsave(&base->cpu_base->lock, flags); curr = timerqueue_getnext(&base->active); @@ -197,6 +201,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) { struct clock_event_device *dev = td->evtdev; + touch_nmi_watchdog(); + SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); if (cpu < 0) SEQ_printf(m, "Broadcast device\n"); diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index e9092a0247bf..f2682799c215 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -599,7 +599,7 @@ static int create_trace_kprobe(int argc, char **argv) bool is_return = false, is_delete = false; char *symbol = NULL, *event = NULL, *group = NULL; char *arg; - unsigned long offset = 0; + long offset = 0; void *addr = NULL; char buf[MAX_EVENT_NAME_LEN]; @@ -667,7 +667,7 @@ static int create_trace_kprobe(int argc, char **argv) symbol = argv[1]; /* TODO: support .init module functions */ ret = traceprobe_split_symbol_offset(symbol, &offset); - if (ret) { + if (ret || offset < 0 || offset > UINT_MAX) { pr_info("Failed to parse either an address or a symbol.\n"); return ret; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 1769a81da8a7..741c00b90fdc 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -293,7 +293,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type, } /* Split symbol and offset. */ -int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) +int traceprobe_split_symbol_offset(char *symbol, long *offset) { char *tmp; int ret; @@ -301,13 +301,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset) if (!offset) return -EINVAL; - tmp = strchr(symbol, '+'); + tmp = strpbrk(symbol, "+-"); if (tmp) { - /* skip sign because kstrtoul doesn't accept '+' */ - ret = kstrtoul(tmp + 1, 0, offset); + ret = kstrtol(tmp, 0, offset); if (ret) return ret; - *tmp = '\0'; } else *offset = 0; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index f6398db09114..0afe921df8c8 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -335,7 +335,7 @@ extern int traceprobe_conflict_field_name(const char *name, extern void traceprobe_update_arg(struct probe_arg *arg); extern void traceprobe_free_probe_arg(struct probe_arg *arg); -extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset); +extern int traceprobe_split_symbol_offset(char *symbol, long *offset); extern ssize_t traceprobe_probes_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, diff --git a/kernel/user.c b/kernel/user.c index b069ccbfb0b0..41e94e453836 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/export.h> #include <linux/user_namespace.h> +#include <linux/proc_fs.h> #include <linux/proc_ns.h> /* @@ -201,6 +202,7 @@ struct user_struct *alloc_uid(kuid_t uid) } spin_unlock_irq(&uidhash_lock); } + proc_register_uid(uid); return up; @@ -222,6 +224,7 @@ static int __init uid_cache_init(void) spin_lock_irq(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); spin_unlock_irq(&uidhash_lock); + proc_register_uid(GLOBAL_ROOT_UID); return 0; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a719a4ad2e74..acccc6c7437f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4070,6 +4070,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) EXPORT_SYMBOL_GPL(workqueue_set_max_active); /** + * current_work - retrieve %current task's work struct + * + * Determine if %current task is a workqueue worker and what it's working on. + * Useful to find out the context that the %current task is running in. + * + * Return: work struct if %current task is a workqueue worker, %NULL otherwise. + */ +struct work_struct *current_work(void) +{ + struct worker *worker = current_wq_worker(); + + return worker ? worker->current_work : NULL; +} +EXPORT_SYMBOL(current_work); + +/** * current_is_workqueue_rescuer - is %current workqueue rescuer? * * Determine whether %current is a workqueue rescuer. Can be used from diff --git a/lib/ioremap.c b/lib/ioremap.c index 86c8911b0e3a..5323b59ca393 100644 --- a/lib/ioremap.c +++ b/lib/ioremap.c @@ -83,7 +83,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, if (ioremap_pmd_enabled() && ((next - addr) == PMD_SIZE) && - IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { + IS_ALIGNED(phys_addr + addr, PMD_SIZE) && + pmd_free_pte_page(pmd)) { if (pmd_set_huge(pmd, phys_addr + addr, prot)) continue; } @@ -109,7 +110,8 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, if (ioremap_pud_enabled() && ((next - addr) == PUD_SIZE) && - IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { + IS_ALIGNED(phys_addr + addr, PUD_SIZE) && + pud_free_pmd_page(pud)) { if (pud_set_huge(pud, phys_addr + addr, prot)) continue; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 4c480a20d76c..3081f1234d4e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -954,7 +954,7 @@ static atomic_t nr_wb_congested[2]; void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) { wait_queue_head_t *wqh = &congestion_wqh[sync]; - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (test_and_clear_bit(bit, &congested->state)) @@ -967,7 +967,7 @@ EXPORT_SYMBOL(clear_wb_congested); void set_wb_congested(struct bdi_writeback_congested *congested, int sync) { - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (!test_and_set_bit(bit, &congested->state)) diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index ca4dc9031073..ac9791dd4768 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -29,6 +29,7 @@ #include <linux/net_tstamp.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> +#include <linux/phy.h> #include <net/arp.h> #include "vlan.h" @@ -559,8 +560,7 @@ static int vlan_dev_init(struct net_device *dev) NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM | NETIF_F_ALL_FCOE; - dev->features |= real_dev->vlan_features | NETIF_F_LLTX | - NETIF_F_GSO_SOFTWARE; + dev->features |= dev->hw_features | NETIF_F_LLTX; dev->gso_max_size = real_dev->gso_max_size; if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); @@ -655,8 +655,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev, { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; + struct phy_device *phydev = vlan->real_dev->phydev; - if (ops->get_ts_info) { + if (phydev && phydev->drv && phydev->drv->ts_info) { + return phydev->drv->ts_info(phydev, info); + } else if (ops->get_ts_info) { return ops->get_ts_info(vlan->real_dev, info); } else { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index f5d2fe5e31cc..c5208136e3fc 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -1603,10 +1603,22 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, /* if yes, the client has roamed and we have * to unclaim it. */ - batadv_handle_unclaim(bat_priv, primary_if, - primary_if->net_dev->dev_addr, - ethhdr->h_source, vid); - goto allow; + if (batadv_has_timed_out(claim->lasttime, 100)) { + /* only unclaim if the last claim entry is + * older than 100 ms to make sure we really + * have a roaming client here. + */ + batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n", + ethhdr->h_source); + batadv_handle_unclaim(bat_priv, primary_if, + primary_if->net_dev->dev_addr, + ethhdr->h_source, vid); + goto allow; + } else { + batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n", + ethhdr->h_source); + goto handled; + } } /* check if it is a multicast/broadcast frame */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index da4078651c22..5b95477c3453 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -716,6 +716,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + bool changed = false; /* If Connectionless Slave Broadcast master role is supported * enable all necessary events for it. @@ -725,6 +726,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) events[1] |= 0x80; /* Synchronization Train Complete */ events[2] |= 0x10; /* Slave Page Response Timeout */ events[2] |= 0x20; /* CSB Channel Map Change */ + changed = true; } /* If Connectionless Slave Broadcast slave role is supported @@ -735,13 +737,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req) events[2] |= 0x02; /* CSB Receive */ events[2] |= 0x04; /* CSB Timeout */ events[2] |= 0x08; /* Truncated Page Complete */ + changed = true; } /* Enable Authenticated Payload Timeout Expired event if supported */ - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { events[2] |= 0x80; + changed = true; + } - hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); + /* Some Broadcom based controllers indicate support for Set Event + * Mask Page 2 command, but then actually do not support it. Since + * the default value is all bits set to zero, the command is only + * required if the event mask has to be changed. In case no change + * to the event mask is needed, skip this command. + */ + if (changed) + hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, + sizeof(events), events); } static void hci_init3_req(struct hci_request *req, unsigned long opt) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index b166b3e441a5..3c9689e10721 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2251,8 +2251,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) else sec_level = authreq_to_seclevel(auth); - if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) + if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) { + /* If link is already encrypted with sufficient security we + * still need refresh encryption as per Core Spec 5.0 Vol 3, + * Part H 2.4.6 + */ + smp_ltk_encrypt(conn, hcon->sec_level); return 0; + } if (sec_level > hcon->pending_sec_level) hcon->pending_sec_level = sec_level; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index efe415ad842a..83bb695f9645 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -229,6 +229,9 @@ static ssize_t brport_show(struct kobject *kobj, struct brport_attribute *brport_attr = to_brport_attr(attr); struct net_bridge_port *p = to_brport(kobj); + if (!brport_attr->show) + return -EINVAL; + return brport_attr->show(p, buf); } diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index 9024283d2bca..9adf16258cab 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c @@ -172,18 +172,69 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) return true; } +static bool poolsize_invalid(const struct ebt_mac_wormhash *w) +{ + return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); +} + +static bool wormhash_offset_invalid(int off, unsigned int len) +{ + if (off == 0) /* not present */ + return false; + + if (off < (int)sizeof(struct ebt_among_info) || + off % __alignof__(struct ebt_mac_wormhash)) + return true; + + off += sizeof(struct ebt_mac_wormhash); + + return off > len; +} + +static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b) +{ + if (a == 0) + a = sizeof(struct ebt_among_info); + + return ebt_mac_wormhash_size(wh) + a == b; +} + static int ebt_among_mt_check(const struct xt_mtchk_param *par) { const struct ebt_among_info *info = par->matchinfo; const struct ebt_entry_match *em = container_of(par->matchinfo, const struct ebt_entry_match, data); - int expected_length = sizeof(struct ebt_among_info); + unsigned int expected_length = sizeof(struct ebt_among_info); const struct ebt_mac_wormhash *wh_dst, *wh_src; int err; + if (expected_length > em->match_size) + return -EINVAL; + + if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) || + wormhash_offset_invalid(info->wh_src_ofs, em->match_size)) + return -EINVAL; + wh_dst = ebt_among_wh_dst(info); - wh_src = ebt_among_wh_src(info); + if (poolsize_invalid(wh_dst)) + return -EINVAL; + expected_length += ebt_mac_wormhash_size(wh_dst); + if (expected_length > em->match_size) + return -EINVAL; + + wh_src = ebt_among_wh_src(info); + if (poolsize_invalid(wh_src)) + return -EINVAL; + + if (info->wh_src_ofs < info->wh_dst_ofs) { + if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs)) + return -EINVAL; + } else { + if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs)) + return -EINVAL; + } + expected_length += ebt_mac_wormhash_size(wh_src); if (em->match_size != EBT_ALIGN(expected_length)) { diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index f46ca417bf2d..50b76011f470 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -2021,7 +2021,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, if (match_kern) match_kern->match_size = ret; - WARN_ON(type == EBT_COMPAT_TARGET && size_left); + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) + return -EINVAL; + match32 = (struct compat_ebt_entry_mwt *) buf; } @@ -2078,6 +2080,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, * * offsets are relative to beginning of struct ebt_entry (i.e., 0). */ + for (i = 0; i < 4 ; ++i) { + if (offsets[i] >= *total) + return -EINVAL; + if (i == 0) + continue; + if (offsets[i-1] > offsets[i]) + return -EINVAL; + } + for (i = 0, j = 1 ; j < 4 ; j++, i++) { struct compat_ebt_entry_mwt *match32; unsigned int size; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index bc95e48d5cfb..378c9ed00d40 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) u32 yes; struct crush_rule *r; + err = -EINVAL; ceph_decode_32_safe(p, end, yes, bad); if (!yes) { dout("crush_decode NO rule %d off %x %p to %p\n", diff --git a/net/core/dev.c b/net/core/dev.c index e8eb30fc2d7b..154da3b1348b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -991,7 +991,7 @@ bool dev_valid_name(const char *name) { if (*name == '\0') return false; - if (strlen(name) >= IFNAMSIZ) + if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) return false; if (!strcmp(name, ".") || !strcmp(name, "..")) return false; @@ -2185,8 +2185,11 @@ EXPORT_SYMBOL(netif_set_xps_queue); */ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) { + bool disabling; int rc; + disabling = txq < dev->real_num_tx_queues; + if (txq < 1 || txq > dev->num_tx_queues) return -EINVAL; @@ -2202,15 +2205,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (dev->num_tc) netif_setup_tc(dev, txq); - if (txq < dev->real_num_tx_queues) { + dev->real_num_tx_queues = txq; + + if (disabling) { + synchronize_net(); qdisc_reset_all_tx_gt(dev, txq); #ifdef CONFIG_XPS netif_reset_xps_queues_gt(dev, txq); #endif } + } else { + dev->real_num_tx_queues = txq; } - dev->real_num_tx_queues = txq; return 0; } EXPORT_SYMBOL(netif_set_real_num_tx_queues); @@ -2510,7 +2517,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) return 0; - eth = (struct ethhdr *)skb_mac_header(skb); + eth = (struct ethhdr *)skb->data; type = eth->h_proto; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index eff329e9c23b..e713c75695f9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, lladdr = neigh->ha; } - if (new & NUD_CONNECTED) - neigh->confirmed = jiffies; - neigh->updated = jiffies; - /* If entry was valid and address is not changed, do not change entry state, if new one is STALE. */ @@ -1159,6 +1155,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, } } + /* Update timestamps only once we know we will make a change to the + * neighbour entry. Otherwise we risk to move the locktime window with + * noop updates and ignore relevant ARP updates. + */ + if (new != old || lladdr != neigh->ha) { + if (new & NUD_CONNECTED) + neigh->confirmed = jiffies; + neigh->updated = jiffies; + } + if (new != old) { neigh_del_timer(neigh); if (new & NUD_PROBE) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index b5c351d2830b..ccd20669ac00 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -310,6 +310,25 @@ out_undo: goto out; } +static int __net_init net_defaults_init_net(struct net *net) +{ + net->core.sysctl_somaxconn = SOMAXCONN; + return 0; +} + +static struct pernet_operations net_defaults_ops = { + .init = net_defaults_init_net, +}; + +static __init int net_defaults_init(void) +{ + if (register_pernet_subsys(&net_defaults_ops)) + panic("Cannot initialize net default settings"); + + return 0; +} + +core_initcall(net_defaults_init); #ifdef CONFIG_NET_NS static struct kmem_cache *net_cachep; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index aa9b46963bcb..3c5e3c022232 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2577,7 +2577,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) { int pos = skb_headlen(skb); - skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; + skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & + SKBTX_SHARED_FRAG; if (len < pos) /* Split line is inside header. */ skb_split_inside_header(skb, skb1, len, pos); else /* Second chunk has no header, nothing to copy. */ @@ -3141,8 +3142,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, skb_copy_from_linear_data_offset(head_skb, offset, skb_put(nskb, hsize), hsize); - skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & - SKBTX_SHARED_FRAG; + skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & + SKBTX_SHARED_FRAG; while (pos < offset + len) { if (i >= nfrags) { @@ -3355,24 +3356,18 @@ void __init skb_init(void) NULL); } -/** - * skb_to_sgvec - Fill a scatter-gather list from a socket buffer - * @skb: Socket buffer containing the buffers to be mapped - * @sg: The scatter-gather list to map into - * @offset: The offset into the buffer's contents to start mapping - * @len: Length of buffer space to be mapped - * - * Fill the specified scatter-gather list with mappings/pointers into a - * region of the buffer space attached to a socket buffer. - */ static int -__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) +__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, + unsigned int recursion_level) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; int elt = 0; + if (unlikely(recursion_level >= 24)) + return -EMSGSIZE; + if (copy > 0) { if (copy > len) copy = len; @@ -3391,6 +3386,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) + return -EMSGSIZE; if (copy > len) copy = len; @@ -3405,16 +3402,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) } skb_walk_frags(skb, frag_iter) { - int end; + int end, ret; WARN_ON(start > offset + len); end = start + frag_iter->len; if ((copy = end - offset) > 0) { + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) + return -EMSGSIZE; + if (copy > len) copy = len; - elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, - copy); + ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, + copy, recursion_level + 1); + if (unlikely(ret < 0)) + return ret; + elt += ret; if ((len -= copy) == 0) return elt; offset += copy; @@ -3425,6 +3428,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) return elt; } +/** + * skb_to_sgvec - Fill a scatter-gather list from a socket buffer + * @skb: Socket buffer containing the buffers to be mapped + * @sg: The scatter-gather list to map into + * @offset: The offset into the buffer's contents to start mapping + * @len: Length of buffer space to be mapped + * + * Fill the specified scatter-gather list with mappings/pointers into a + * region of the buffer space attached to a socket buffer. Returns either + * the number of scatterlist items used, or -EMSGSIZE if the contents + * could not fit. + */ +int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) +{ + int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); + + if (nsg <= 0) + return nsg; + + sg_mark_end(&sg[nsg - 1]); + + return nsg; +} +EXPORT_SYMBOL_GPL(skb_to_sgvec); + /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given * sglist without mark the sg which contain last skb data as the end. * So the caller can mannipulate sg list as will when padding new data after @@ -3447,19 +3475,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) { - return __skb_to_sgvec(skb, sg, offset, len); + return __skb_to_sgvec(skb, sg, offset, len, 0); } EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) -{ - int nsg = __skb_to_sgvec(skb, sg, offset, len); - - sg_mark_end(&sg[nsg - 1]); - return nsg; -} -EXPORT_SYMBOL_GPL(skb_to_sgvec); /** * skb_cow_data - Check that a socket buffer's data buffers are writable @@ -3597,7 +3617,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) skb_queue_tail(&sk->sk_error_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) - sk->sk_data_ready(sk); + sk->sk_error_report(sk); return 0; } EXPORT_SYMBOL(sock_queue_err_skb); @@ -3741,7 +3761,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, return; if (tsonly) { - skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; + skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & + SKBTX_ANY_TSTAMP; skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 6578a0a2f708..32898247d8bf 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -429,8 +429,6 @@ static __net_init int sysctl_core_net_init(struct net *net) { struct ctl_table *tbl; - net->core.sysctl_somaxconn = SOMAXCONN; - tbl = netns_core_table; if (!net_eq(net, &init_net)) { tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9d43c1f40274..ff3b058cf58c 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -789,6 +789,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (skb == NULL) goto out_release; + if (sk->sk_state == DCCP_CLOSED) { + rc = -ENOTCONN; + goto out_discard; + } + skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 20c49c724ba0..e8b279443d37 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -206,9 +206,13 @@ static inline void lowpan_netlink_fini(void) static int lowpan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *wdev = netdev_notifier_info_to_dev(ptr); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct wpan_dev *wpan_dev; - if (wdev->type != ARPHRD_IEEE802154) + if (ndev->type != ARPHRD_IEEE802154) + return NOTIFY_DONE; + wpan_dev = ndev->ieee802154_ptr; + if (!wpan_dev) goto out; switch (event) { @@ -217,8 +221,8 @@ static int lowpan_device_event(struct notifier_block *unused, * also delete possible lowpan interfaces which belongs * to the wpan interface. */ - if (wdev->ieee802154_ptr->lowpan_dev) - lowpan_dellink(wdev->ieee802154_ptr->lowpan_dev, NULL); + if (wpan_dev->lowpan_dev) + lowpan_dellink(wpan_dev->lowpan_dev, NULL); break; default: break; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index a548be247e15..47b397264f24 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -302,12 +302,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); - dev_put(dev); - err = dev_queue_xmit(skb); if (err > 0) err = net_xmit_errno(err); + dev_put(dev); + return err ?: size; out_skb: @@ -689,12 +689,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); - dev_put(dev); - err = dev_queue_xmit(skb); if (err > 0) err = net_xmit_errno(err); + dev_put(dev); + return err ?: size; out_skb: diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c index b4506280e3e4..bfb76a84be73 100644 --- a/net/ipc_router/ipc_router_core.c +++ b/net/ipc_router/ipc_router_core.c @@ -224,6 +224,25 @@ void msm_ipc_router_set_ws_allowed(bool flag) is_wakeup_source_allowed = flag; } +/** + * is_sensor_port() - Check if the remote port is sensor service or not + * @rport: Pointer to the remote port. + * + * Return: true if the remote port is sensor service else false. + */ +static int is_sensor_port(struct msm_ipc_router_remote_port *rport) +{ + u32 svcid = 0; + + if (rport && rport->server) { + svcid = rport->server->name.service; + if (svcid == 400 || (svcid >= 256 && svcid <= 320)) + return true; + } + + return false; +} + static void init_routing_table(void) { int i; @@ -277,7 +296,7 @@ static uint32_t ipc_router_calc_checksum(union rr_control_msg *msg) */ static void skb_copy_to_log_buf(struct sk_buff_head *skb_head, unsigned int pl_len, unsigned int hdr_offset, - uint64_t *log_buf) + unsigned char *log_buf) { struct sk_buff *temp_skb; unsigned int copied_len = 0, copy_len = 0; @@ -357,7 +376,8 @@ static void ipc_router_log_msg(void *log_ctx, uint32_t xchng_type, else if (hdr->version == IPC_ROUTER_V2) hdr_offset = sizeof(struct rr_header_v2); } - skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf); + skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, + (unsigned char *)&pl_buf); if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) && (rport_ptr->server != NULL)) { @@ -2730,7 +2750,6 @@ static void do_read_data(struct work_struct *work) struct rr_packet *pkt = NULL; struct msm_ipc_port *port_ptr; struct msm_ipc_router_remote_port *rport_ptr; - int ret; struct msm_ipc_router_xprt_info *xprt_info = container_of(work, @@ -2738,16 +2757,7 @@ static void do_read_data(struct work_struct *work) read_data); while ((pkt = rr_read(xprt_info)) != NULL) { - if (pkt->length < calc_rx_header_size(xprt_info) || - pkt->length > MAX_IPC_PKT_SIZE) { - IPC_RTR_ERR("%s: Invalid pkt length %d\n", - __func__, pkt->length); - goto read_next_pkt1; - } - ret = extract_header(pkt); - if (ret < 0) - goto read_next_pkt1; hdr = &(pkt->hdr); if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) && @@ -4194,6 +4204,7 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, { struct msm_ipc_router_xprt_info *xprt_info = xprt->priv; struct msm_ipc_router_xprt_work *xprt_work; + struct msm_ipc_router_remote_port *rport_ptr = NULL; struct rr_packet *pkt; int ret; @@ -4246,16 +4257,40 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, if (!pkt) return; + if (pkt->length < calc_rx_header_size(xprt_info) || + pkt->length > MAX_IPC_PKT_SIZE) { + IPC_RTR_ERR("%s: Invalid pkt length %d\n", + __func__, pkt->length); + release_pkt(pkt); + return; + } + + ret = extract_header(pkt); + if (ret < 0) { + release_pkt(pkt); + return; + } + pkt->ws_need = false; + + if (pkt->hdr.type == IPC_ROUTER_CTRL_CMD_DATA) + rport_ptr = ipc_router_get_rport_ref(pkt->hdr.src_node_id, + pkt->hdr.src_port_id); + mutex_lock(&xprt_info->rx_lock_lhb2); list_add_tail(&pkt->list, &xprt_info->pkt_list); - if (!xprt_info->dynamic_ws) { - __pm_stay_awake(&xprt_info->ws); - pkt->ws_need = true; - } else { - if (is_wakeup_source_allowed) { + /* check every pkt is from SENSOR services or not and + * avoid holding both edge and port specific wake-up sources + */ + if (!is_sensor_port(rport_ptr)) { + if (!xprt_info->dynamic_ws) { __pm_stay_awake(&xprt_info->ws); pkt->ws_need = true; + } else { + if (is_wakeup_source_allowed) { + __pm_stay_awake(&xprt_info->ws); + pkt->ws_need = true; + } } } mutex_unlock(&xprt_info->rx_lock_lhb2); diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 22377c8ff14b..e8f862358518 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags + sglists); - skb_to_sgvec_nomark(skb, sg, 0, skb->len); + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); + if (unlikely(err < 0)) + goto out_free; if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ @@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, ihl); sg_init_table(sg, nfrags + sglists); - skb_to_sgvec_nomark(skb, sg, 0, skb->len); + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); + if (unlikely(err < 0)) + goto out_free; if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index cb5eb649ad5f..bfa79831873f 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) /*unsigned long now; */ struct net *net = dev_net(dev); - rt = ip_route_output(net, sip, tip, 0, 0); + rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev)); if (IS_ERR(rt)) return 1; if (rt->dst.dev != dev) { @@ -658,6 +658,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) unsigned char *arp_ptr; struct rtable *rt; unsigned char *sha; + unsigned char *tha = NULL; __be32 sip, tip; u16 dev_type = dev->type; int addr_type; @@ -729,6 +730,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) break; #endif default: + tha = arp_ptr; arp_ptr += dev->addr_len; } memcpy(&tip, arp_ptr, 4); @@ -839,8 +841,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) It is possible, that this option should be enabled for some devices (strip is candidate) */ - is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && - addr_type == RTN_UNICAST; + is_garp = tip == sip && addr_type == RTN_UNICAST; + + /* Unsolicited ARP _replies_ also require target hwaddr to be + * the same as source. + */ + if (is_garp && arp->ar_op == htons(ARPOP_REPLY)) + is_garp = + /* IPv4 over IEEE 1394 doesn't provide target + * hardware address field in its ARP payload. + */ + tha && + !memcmp(tha, sha, dev->addr_len); if (!n && ((arp->ar_op == htons(ARPOP_REPLY) && diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 20fb25e3027b..3d8021d55336 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -268,10 +268,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esph->spi = x->id.spi; sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); - + err = skb_to_sgvec(skb, sg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + if (unlikely(err < 0)) + goto error; aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); aead_request_set_ad(req, assoclen); @@ -481,7 +482,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) } sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, 0, skb->len); + err = skb_to_sgvec(skb, sg, 0, skb->len); + if (unlikely(err < 0)) + goto out; aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); aead_request_set_ad(req, assoclen); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 313e3c11a15a..44abc52bae13 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -640,6 +640,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) fi->fib_nh, cfg)) return 1; } +#ifdef CONFIG_IP_ROUTE_CLASSID + if (cfg->fc_flow && + cfg->fc_flow != fi->fib_nh->nh_tclassid) + return 1; +#endif if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) return 0; diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index c5fb2f694ed0..b34fa1bb278f 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -119,6 +119,9 @@ out: static bool inet_fragq_should_evict(const struct inet_frag_queue *q) { + if (!hlist_unhashed(&q->list_evictor)) + return false; + return q->net->low_thresh == 0 || frag_mem_limit(q->net) >= q->net->low_thresh; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index d35509212013..1b93ea766916 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -241,7 +241,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg); if (!ipv6_addr_v4mapped(&src_info->ipi6_addr)) return -EINVAL; - ipc->oif = src_info->ipi6_ifindex; + if (src_info->ipi6_ifindex) + ipc->oif = src_info->ipi6_ifindex; ipc->addr = src_info->ipi6_addr.s6_addr32[3]; continue; } @@ -264,7 +265,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) return -EINVAL; info = (struct in_pktinfo *)CMSG_DATA(cmsg); - ipc->oif = info->ipi_ifindex; + if (info->ipi_ifindex) + ipc->oif = info->ipi_ifindex; ipc->addr = info->ipi_spec_dst.s_addr; break; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 80e2d1b0c08c..3d62feb65932 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net, struct net_device *dev; char name[IFNAMSIZ]; - if (parms->name[0]) + err = -E2BIG; + if (parms->name[0]) { + if (!dev_valid_name(parms->name)) + goto failed; strlcpy(name, parms->name, IFNAMSIZ); - else { - if (strlen(ops->kind) > (IFNAMSIZ - 3)) { - err = -E2BIG; + } else { + if (strlen(ops->kind) > (IFNAMSIZ - 3)) goto failed; - } strlcpy(name, ops->kind, IFNAMSIZ); strncat(name, "%d", 2); } diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index c3776ff6749f..699f8a5457a3 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; + const struct sock *sk = skb_to_full_sk(skb); + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; unsigned int hh_len; if (addr_type == RTN_UNSPEC) @@ -39,7 +40,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); @@ -58,7 +59,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 4cfcc22f7430..f51b32ed353c 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -329,6 +329,10 @@ unsigned int arpt_do_table(struct sk_buff *skb, } if (table_base + v != arpt_next_entry(e)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; } @@ -507,17 +511,15 @@ static inline int check_target(struct arpt_entry *e, const char *name) } static inline int -find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) +find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, + struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; struct xt_target *target; - unsigned long pcnt; int ret; - pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(pcnt)) + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; - e->counters.pcnt = pcnt; t = arpt_get_target(e); target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, @@ -536,7 +538,7 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size) err: module_put(t->u.kernel.target->me); out: - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); return ret; } @@ -624,7 +626,7 @@ static inline void cleanup_entry(struct arpt_entry *e) if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); } /* Checks and translates the user-supplied table segment (held in @@ -633,6 +635,7 @@ static inline void cleanup_entry(struct arpt_entry *e) static int translate_table(struct xt_table_info *newinfo, void *entry0, const struct arpt_replace *repl) { + struct xt_percpu_counter_alloc_state alloc_state = { 0 }; struct arpt_entry *iter; unsigned int *offsets; unsigned int i; @@ -706,7 +709,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { - ret = find_check_entry(iter, repl->name, repl->size); + ret = find_check_entry(iter, repl->name, repl->size, + &alloc_state); if (ret != 0) break; ++i; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index a98173d1ea97..dac62b5e7fe3 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -408,6 +408,10 @@ ipt_do_table(struct sk_buff *skb, } if (table_base + v != ipt_next_entry(e) && !(e->ip.flags & IPT_F_GOTO)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; pr_debug("Pushed %p into pos %u\n", e, stackidx - 1); @@ -645,7 +649,8 @@ static int check_target(struct ipt_entry *e, struct net *net, const char *name) static int find_check_entry(struct ipt_entry *e, struct net *net, const char *name, - unsigned int size) + unsigned int size, + struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; struct xt_target *target; @@ -653,12 +658,9 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; - unsigned long pcnt; - pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(pcnt)) + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; - e->counters.pcnt = pcnt; j = 0; mtpar.net = net; @@ -697,7 +699,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, cleanup_match(ematch, net); } - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); return ret; } @@ -793,7 +795,7 @@ cleanup_entry(struct ipt_entry *e, struct net *net) if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); } /* Checks and translates the user-supplied table segment (held in @@ -802,6 +804,7 @@ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ipt_replace *repl) { + struct xt_percpu_counter_alloc_state alloc_state = { 0 }; struct ipt_entry *iter; unsigned int *offsets; unsigned int i; @@ -871,7 +874,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { - ret = find_check_entry(iter, net, repl->name, repl->size); + ret = find_check_entry(iter, net, repl->name, repl->size, + &alloc_state); if (ret != 0) break; ++i; diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index 574f7ebba0b6..ac8342dcb55e 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -252,16 +252,16 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, if (set_h245_addr(skb, protoff, data, dataoff, taddr, &ct->tuplehash[!dir].tuple.dst.u3, htons((port & htons(1)) ? nated_port + 1 : - nated_port)) == 0) { - /* Save ports */ - info->rtp_port[i][dir] = rtp_port; - info->rtp_port[i][!dir] = htons(nated_port); - } else { + nated_port))) { nf_ct_unexpect_related(rtp_exp); nf_ct_unexpect_related(rtcp_exp); return -1; } + /* Save ports */ + info->rtp_port[i][dir] = rtp_port; + info->rtp_port[i][!dir] = htons(nated_port); + /* Success */ pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n", &rtp_exp->tuple.src.u3.ip, @@ -370,15 +370,15 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct, /* Modify signal */ if (set_h225_addr(skb, protoff, data, dataoff, taddr, &ct->tuplehash[!dir].tuple.dst.u3, - htons(nated_port)) == 0) { - /* Save ports */ - info->sig_port[dir] = port; - info->sig_port[!dir] = htons(nated_port); - } else { + htons(nated_port))) { nf_ct_unexpect_related(exp); return -1; } + /* Save ports */ + info->sig_port[dir] = port; + info->sig_port[!dir] = htons(nated_port); + pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n", &exp->tuple.src.u3.ip, ntohs(exp->tuple.src.u.tcp.port), @@ -462,24 +462,27 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct, /* Modify signal */ if (set_h225_addr(skb, protoff, data, 0, &taddr[idx], &ct->tuplehash[!dir].tuple.dst.u3, - htons(nated_port)) == 0) { - /* Save ports */ - info->sig_port[dir] = port; - info->sig_port[!dir] = htons(nated_port); - - /* Fix for Gnomemeeting */ - if (idx > 0 && - get_h225_addr(ct, *data, &taddr[0], &addr, &port) && - (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { - set_h225_addr(skb, protoff, data, 0, &taddr[0], - &ct->tuplehash[!dir].tuple.dst.u3, - info->sig_port[!dir]); - } - } else { + htons(nated_port))) { nf_ct_unexpect_related(exp); return -1; } + /* Save ports */ + info->sig_port[dir] = port; + info->sig_port[!dir] = htons(nated_port); + + /* Fix for Gnomemeeting */ + if (idx > 0 && + get_h225_addr(ct, *data, &taddr[0], &addr, &port) && + (ntohl(addr.ip) & 0xff000000) == 0x7f000000) { + if (set_h225_addr(skb, protoff, data, 0, &taddr[0], + &ct->tuplehash[!dir].tuple.dst.u3, + info->sig_port[!dir])) { + nf_ct_unexpect_related(exp); + return -1; + } + } + /* Success */ pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n", &exp->tuple.src.u3.ip, @@ -550,9 +553,9 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, } /* Modify signal */ - if (!set_h225_addr(skb, protoff, data, dataoff, taddr, - &ct->tuplehash[!dir].tuple.dst.u3, - htons(nated_port)) == 0) { + if (set_h225_addr(skb, protoff, data, dataoff, taddr, + &ct->tuplehash[!dir].tuple.dst.u3, + htons(nated_port))) { nf_ct_unexpect_related(exp); return -1; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index e1a5e582ec48..ce4f10e9513a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -126,10 +126,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1)); static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; -static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; +static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; + +static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU; + /* * Interface to generic destination cache. */ @@ -2782,7 +2785,8 @@ static struct ctl_table ipv4_route_table[] = { .data = &ip_rt_min_pmtu, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = &ip_min_valid_pmtu, }, { .procname = "min_adv_mss", diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 277e502ff253..da83648f95f8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -118,6 +118,7 @@ int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2; #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ +#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -3544,7 +3545,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (before(ack, prior_snd_una)) { /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ if (before(ack, prior_snd_una - tp->max_window)) { - tcp_send_challenge_ack(sk, skb); + if (!(flag & FLAG_NO_CHALLENGE_ACK)) + tcp_send_challenge_ack(sk, skb); return -1; } goto old_ack; @@ -5466,10 +5468,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) else tp->pred_flags = 0; - if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); - sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, @@ -5533,6 +5531,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; + bool fastopen_fail; tcp_parse_options(skb, &tp->rx_opt, 0, &foc); if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) @@ -5635,10 +5634,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_finish_connect(sk, skb); - if ((tp->syn_fastopen || tp->syn_data) && - tcp_rcv_fastopen_synack(sk, skb, &foc)) - return -1; + fastopen_fail = (tp->syn_fastopen || tp->syn_data) && + tcp_rcv_fastopen_synack(sk, skb, &foc); + if (!sock_flag(sk, SOCK_DEAD)) { + sk->sk_state_change(sk); + sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); + } + if (fastopen_fail) + return -1; if (sk->sk_write_pending || icsk->icsk_accept_queue.rskq_defer_accept || icsk->icsk_ack.pingpong) { @@ -5832,13 +5836,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) /* step 5: check the ACK field */ acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | - FLAG_UPDATE_TS_RECENT) > 0; + FLAG_UPDATE_TS_RECENT | + FLAG_NO_CHALLENGE_ACK) > 0; + if (!acceptable) { + if (sk->sk_state == TCP_SYN_RECV) + return 1; /* send one RST */ + tcp_send_challenge_ack(sk, skb); + goto discard; + } switch (sk->sk_state) { case TCP_SYN_RECV: - if (!acceptable) - return 1; - if (!tp->srtt_us) tcp_synack_rtt_meas(sk, req); @@ -5907,14 +5915,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) * our SYNACK so stop the SYNACK timer. */ if (req) { - /* Return RST if ack_seq is invalid. - * Note that RFC793 only says to generate a - * DUPACK for it but for TCP Fast Open it seems - * better to treat this case like TCP_SYN_RECV - * above. - */ - if (!acceptable) - return 1; /* We no longer need the request sock. */ reqsk_fastopen_remove(sk, req, false); tcp_rearm_rto(sk); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4d6f09c05a12..9828c4aa3739 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1750,6 +1750,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, err = udplite_checksum_init(skb, uh); if (err) return err; + + if (UDP_SKB_CB(skb)->partial_cov) { + skb->csum = inet_compute_pseudo(skb, proto); + return 0; + } } return skb_checksum_init_zero_check(skb, proto, uh->check, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 40c29712f32a..199658afa68b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -986,7 +986,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, INIT_HLIST_NODE(&ifa->addr_lst); ifa->scope = scope; ifa->prefix_len = pfxlen; - ifa->flags = flags | IFA_F_TENTATIVE; + ifa->flags = flags; + /* No need to add the TENTATIVE flag for addresses with NODAD */ + if (!(flags & IFA_F_NODAD)) + ifa->flags |= IFA_F_TENTATIVE; ifa->valid_lft = valid_lft; ifa->prefered_lft = prefered_lft; ifa->cstamp = ifa->tstamp = jiffies; diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 189eb10b742d..e742c4deb13d 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); sg_init_table(sg, nfrags + sglists); - skb_to_sgvec_nomark(skb, sg, 0, skb->len); + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); + if (unlikely(err < 0)) + goto out_free; if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ @@ -603,7 +605,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) ip6h->hop_limit = 0; sg_init_table(sg, nfrags + sglists); - skb_to_sgvec_nomark(skb, sg, 0, skb->len); + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); + if (unlikely(err < 0)) + goto out_free; if (x->props.flags & XFRM_STATE_ESN) { /* Attach seqhi sg right after packet payload */ diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index cbcdd5db31f4..44a2010e2076 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -248,9 +248,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) esph->spi = x->id.spi; sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, - (unsigned char *)esph - skb->data, - assoclen + ivlen + clen + alen); + err = skb_to_sgvec(skb, sg, + (unsigned char *)esph - skb->data, + assoclen + ivlen + clen + alen); + if (unlikely(err < 0)) + goto error; aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); aead_request_set_ad(req, assoclen); @@ -423,7 +425,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) } sg_init_table(sg, nfrags); - skb_to_sgvec(skb, sg, 0, skb->len); + ret = skb_to_sgvec(skb, sg, 0, skb->len); + if (unlikely(ret < 0)) + goto out; aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); aead_request_set_ad(req, assoclen); diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c index 9a4d7322fb22..391a8fedb27e 100644 --- a/net/ipv6/ip6_checksum.c +++ b/net/ipv6/ip6_checksum.c @@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) err = udplite_checksum_init(skb, uh); if (err) return err; + + if (UDP_SKB_CB(skb)->partial_cov) { + skb->csum = ip6_compute_pseudo(skb, proto); + return 0; + } } /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 85afef175cf9..6e496c3dd8ef 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -320,11 +320,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, if (t || !create) return t; - if (parms->name[0]) + if (parms->name[0]) { + if (!dev_valid_name(parms->name)) + return NULL; strlcpy(name, parms->name, IFNAMSIZ); - else + } else { strcpy(name, "ip6gre%d"); - + } dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, ip6gre_tunnel_setup); if (!dev) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3ef81c387923..bfa710e8b615 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -340,6 +340,10 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) static inline int ip6_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { + struct dst_entry *dst = skb_dst(skb); + + IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); skb_sender_cpu_clear(skb); return dst_output(net, sk, skb); } @@ -534,8 +538,6 @@ int ip6_forward(struct sk_buff *skb) hdr->hop_limit--; - IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); - IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, net, NULL, skb, skb->dev, dst->dev, ip6_forward_finish); @@ -1276,7 +1278,7 @@ static int __ip6_append_data(struct sock *sk, unsigned int flags, int dontfrag) { struct sk_buff *skb, *skb_prev = NULL; - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; int exthdrlen = 0; int dst_exthdrlen = 0; int hh_len; @@ -1312,6 +1314,12 @@ static int __ip6_append_data(struct sock *sk, sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len; + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit + * the first fragment + */ + if (headersize + transhdrlen > mtu) + goto emsgsize; + if (cork->length + length > mtu - headersize && dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_RAW)) { @@ -1327,9 +1335,8 @@ static int __ip6_append_data(struct sock *sk, if (cork->length + length > maxnonfragsize - headersize) { emsgsize: - ipv6_local_error(sk, EMSGSIZE, fl6, - mtu - headersize + - sizeof(struct ipv6hdr)); + pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); + ipv6_local_error(sk, EMSGSIZE, fl6, pmtu); return -EMSGSIZE; } diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 27b00c4e642d..3c2468bd0b7c 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -286,13 +286,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) struct net_device *dev; struct ip6_tnl *t; char name[IFNAMSIZ]; - int err = -ENOMEM; + int err = -E2BIG; - if (p->name[0]) + if (p->name[0]) { + if (!dev_valid_name(p->name)) + goto failed; strlcpy(name, p->name, IFNAMSIZ); - else + } else { sprintf(name, "ip6tnl%%d"); - + } + err = -ENOMEM; dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, ip6_tnl_dev_setup); if (!dev) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 00111ac94251..b8bf123f7f79 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p char name[IFNAMSIZ]; int err; - if (p->name[0]) + if (p->name[0]) { + if (!dev_valid_name(p->name)) + goto failed; strlcpy(name, p->name, IFNAMSIZ); - else + } else { sprintf(name, "ip6_vti%%d"); + } dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); if (!dev) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3452f9037ad4..dfe55e7ef07d 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1480,7 +1480,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb, *(opt++) = (rd_len >> 3); opt += 6; - memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8); + skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt, + rd_len - 8); } void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) @@ -1688,6 +1689,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEADDR: neigh_changeaddr(&nd_tbl, dev); fib6_run_gc(0, net, false); + /* fallthrough */ + case NETDEV_UP: idev = in6_dev_get(dev); if (!idev) break; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 308b825fd12f..f424dad89054 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -429,6 +429,10 @@ ip6t_do_table(struct sk_buff *skb, } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { + if (unlikely(stackidx >= private->stacksize)) { + verdict = NF_DROP; + break; + } jumpstack[stackidx++] = e; } @@ -662,7 +666,8 @@ static int check_target(struct ip6t_entry *e, struct net *net, const char *name) static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, - unsigned int size) + unsigned int size, + struct xt_percpu_counter_alloc_state *alloc_state) { struct xt_entry_target *t; struct xt_target *target; @@ -670,12 +675,9 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; - unsigned long pcnt; - pcnt = xt_percpu_counter_alloc(); - if (IS_ERR_VALUE(pcnt)) + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; - e->counters.pcnt = pcnt; j = 0; mtpar.net = net; @@ -713,7 +715,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, cleanup_match(ematch, net); } - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); return ret; } @@ -808,8 +810,7 @@ static void cleanup_entry(struct ip6t_entry *e, struct net *net) if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); - - xt_percpu_counter_free(e->counters.pcnt); + xt_percpu_counter_free(&e->counters); } /* Checks and translates the user-supplied table segment (held in @@ -818,6 +819,7 @@ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { + struct xt_percpu_counter_alloc_state alloc_state = { 0 }; struct ip6t_entry *iter; unsigned int *offsets; unsigned int i; @@ -887,7 +889,8 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { - ret = find_check_entry(iter, net, repl->name, repl->size); + ret = find_check_entry(iter, net, repl->name, repl->size, + &alloc_state); if (ret != 0) break; ++i; diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 238e70c3f7b7..7b9c2cabd495 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, target, maniptype)) return false; + + /* must reload, offset might have changed */ + ipv6h = (void *)skb->data + iphdroff; + manip_addr: if (maniptype == NF_NAT_MANIP_SRC) ipv6h->saddr = target->src.u3.in6; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5842430a122a..6b65f3bf7270 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -844,6 +844,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_node *fn; struct rt6_info *rt; + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) + flags &= ~RT6_LOOKUP_F_IFACE; + read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index d17d64edb718..51f7c32f04d7 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel *t = netdev_priv(dev); - if (t->dev == sitn->fb_tunnel_dev) { + if (dev == sitn->fb_tunnel_dev) { ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); t->ip6rd.relay_prefix = 0; t->ip6rd.prefixlen = 16; @@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, if (!create) goto failed; - if (parms->name[0]) + if (parms->name[0]) { + if (!dev_valid_name(parms->name)) + goto failed; strlcpy(name, parms->name, IFNAMSIZ); - else + } else { strcpy(name, "sit%d"); - + } dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, ipip6_tunnel_setup); if (!dev) @@ -690,6 +692,7 @@ static int ipip6_rcv(struct sk_buff *skb) if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6))) goto out; + iph = ip_hdr(skb); err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 20ab7b2ec463..aeffb65181f5 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -2381,9 +2381,11 @@ static int afiucv_iucv_init(void) af_iucv_dev->driver = &af_iucv_driver; err = device_register(af_iucv_dev); if (err) - goto out_driver; + goto out_iucv_dev; return 0; +out_iucv_dev: + put_device(af_iucv_dev); out_driver: driver_unregister(&af_iucv_driver); out_iucv: diff --git a/net/key/af_key.c b/net/key/af_key.c index 6482b001f19a..15150b412930 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3305,7 +3305,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, p += pol->sadb_x_policy_len*8; sec_ctx = (struct sadb_x_sec_ctx *)p; if (len < pol->sadb_x_policy_len*8 + - sec_ctx->sadb_x_sec_len) { + sec_ctx->sadb_x_sec_len*8) { *dir = -EINVAL; goto out; } diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ec8f6a6485e3..92df832a1896 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1518,9 +1518,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 encap = cfg->encap; /* Quick sanity checks */ + err = -EPROTONOSUPPORT; + if (sk->sk_type != SOCK_DGRAM) { + pr_debug("tunl %hu: fd %d wrong socket type\n", + tunnel_id, fd); + goto err; + } switch (encap) { case L2TP_ENCAPTYPE_UDP: - err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_UDP) { pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); @@ -1528,7 +1533,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 } break; case L2TP_ENCAPTYPE_IP: - err = -EPROTONOSUPPORT; if (sk->sk_protocol != IPPROTO_L2TP) { pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index fb3248ff8b48..ae3438685caa 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -732,6 +732,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if ((session->ifname[0] && nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || + (session->offset && + nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) || (session->cookie_len && nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0])) || diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index bb8edb9ef506..1e698768aca8 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -309,6 +309,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) int rc = -EINVAL; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); + + lock_sock(sk); if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; @@ -380,6 +382,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) out_put: llc_sap_put(sap); out: + release_sock(sk); return rc; } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 7fc1250c8d37..00b25114843f 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1442,7 +1442,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: - BUG(); + WARN_ON(1); break; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f3deee7da389..c16b4da20db2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4326,6 +4326,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) return -EINVAL; + /* If a reconfig is happening, bail out */ + if (local->in_reconfig) + return -EBUSY; + if (assoc) { rcu_read_lock(); have_sta = sta_info_get(sdata, cbss->bssid); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5bad05e9af90..45fb1abdb265 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -194,6 +194,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb) } if (ieee80211_is_action(mgmt->frame_control) && + !ieee80211_has_protected(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_HT && mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS && ieee80211_sdata_running(sdata)) { diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 52cfc4478511..c2ce7dec5198 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -7,6 +7,7 @@ #include <linux/if_arp.h> #include <linux/ipv6.h> #include <linux/mpls.h> +#include <linux/nospec.h> #include <linux/vmalloc.h> #include <net/ip.h> #include <net/dst.h> @@ -714,6 +715,22 @@ errout: return err; } +static bool mpls_label_ok(struct net *net, unsigned int *index) +{ + bool is_ok = true; + + /* Reserved labels may not be set */ + if (*index < MPLS_LABEL_FIRST_UNRESERVED) + is_ok = false; + + /* The full 20 bit range may not be supported. */ + if (is_ok && *index >= net->mpls.platform_labels) + is_ok = false; + + *index = array_index_nospec(*index, net->mpls.platform_labels); + return is_ok; +} + static int mpls_route_add(struct mpls_route_config *cfg) { struct mpls_route __rcu **platform_label; @@ -732,12 +749,7 @@ static int mpls_route_add(struct mpls_route_config *cfg) index = find_free_label(net); } - /* Reserved labels may not be set */ - if (index < MPLS_LABEL_FIRST_UNRESERVED) - goto errout; - - /* The full 20 bit range may not be supported. */ - if (index >= net->mpls.platform_labels) + if (!mpls_label_ok(net, &index)) goto errout; /* Append makes no sense with mpls */ @@ -798,12 +810,7 @@ static int mpls_route_del(struct mpls_route_config *cfg) index = cfg->rc_label; - /* Reserved labels may not be removed */ - if (index < MPLS_LABEL_FIRST_UNRESERVED) - goto errout; - - /* The full 20 bit range may not be supported */ - if (index >= net->mpls.platform_labels) + if (!mpls_label_ok(net, &index)) goto errout; mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); @@ -1162,10 +1169,9 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, &cfg->rc_label)) goto errout; - /* Reserved labels may not be set */ - if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED) + if (!mpls_label_ok(cfg->rc_nlinfo.nl_net, + &cfg->rc_label)) goto errout; - break; } case RTA_VIA: diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 660939df7c94..c68e020427ab 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -887,8 +887,13 @@ restart: } out: local_bh_enable(); - if (last) + if (last) { + /* nf ct hash resize happened, now clear the leftover. */ + if ((struct nf_conn *)cb->args[1] == last) + cb->args[1] = 0; + nf_ct_put(last); + } return skb->len; } @@ -999,9 +1004,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { static int ctnetlink_parse_tuple(const struct nlattr * const cda[], - struct nf_conntrack_tuple *tuple, - enum ctattr_type type, u_int8_t l3num, - struct nf_conntrack_zone *zone) + struct nf_conntrack_tuple *tuple, u32 type, + u_int8_t l3num, struct nf_conntrack_zone *zone) { struct nlattr *tb[CTA_TUPLE_MAX+1]; int err; @@ -2416,7 +2420,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = { static inline int ctnetlink_exp_dump_tuple(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, - enum ctattr_expect type) + u32 type) { struct nlattr *nest_parms; diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c index fbce552a796e..7d7466dbf663 100644 --- a/net/netfilter/nf_nat_proto_common.c +++ b/net/netfilter/nf_nat_proto_common.c @@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, const struct nf_conn *ct, u16 *rover) { - unsigned int range_size, min, i; + unsigned int range_size, min, max, i; __be16 *portptr; u_int16_t off; @@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto, } } else { min = ntohs(range->min_proto.all); - range_size = ntohs(range->max_proto.all) - min + 1; + max = ntohs(range->max_proto.all); + if (unlikely(max < min)) + swap(max, min); + range_size = max - min + 1; } if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) { diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index f853b55bf877..7edcfda288c4 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -501,7 +501,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, if (entskb->tstamp.tv64) { struct nfqnl_msg_packet_timestamp ts; - struct timespec64 kts = ktime_to_timespec64(skb->tstamp); + struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); ts.sec = cpu_to_be64(kts.tv_sec); ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 5b52dd3feb7d..1f3c305df45d 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -38,6 +38,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); +#define XT_PCPU_BLOCK_SIZE 4096 + struct compat_delta { unsigned int offset; /* offset in kernel */ int delta; /* delta in 32bit user land */ @@ -364,6 +366,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) return buf; } +/** + * xt_check_proc_name - check that name is suitable for /proc file creation + * + * @name: file name candidate + * @size: length of buffer + * + * some x_tables modules wish to create a file in /proc. + * This function makes sure that the name is suitable for this + * purpose, it checks that name is NUL terminated and isn't a 'special' + * name, like "..". + * + * returns negative number on error or 0 if name is useable. + */ +int xt_check_proc_name(const char *name, unsigned int size) +{ + if (name[0] == '\0') + return -EINVAL; + + if (strnlen(name, size) == size) + return -ENAMETOOLONG; + + if (strcmp(name, ".") == 0 || + strcmp(name, "..") == 0 || + strchr(name, '/')) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(xt_check_proc_name); + int xt_check_match(struct xt_mtchk_param *par, unsigned int size, u_int8_t proto, bool inv_proto) { @@ -1592,6 +1624,59 @@ void xt_proto_fini(struct net *net, u_int8_t af) } EXPORT_SYMBOL_GPL(xt_proto_fini); +/** + * xt_percpu_counter_alloc - allocate x_tables rule counter + * + * @state: pointer to xt_percpu allocation state + * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct + * + * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then + * contain the address of the real (percpu) counter. + * + * Rule evaluation needs to use xt_get_this_cpu_counter() helper + * to fetch the real percpu counter. + * + * To speed up allocation and improve data locality, a 4kb block is + * allocated. + * + * xt_percpu_counter_alloc_state contains the base address of the + * allocated page and the current sub-offset. + * + * returns false on error. + */ +bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, + struct xt_counters *counter) +{ + BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); + + if (nr_cpu_ids <= 1) + return true; + + if (!state->mem) { + state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, + XT_PCPU_BLOCK_SIZE); + if (!state->mem) + return false; + } + counter->pcnt = (__force unsigned long)(state->mem + state->off); + state->off += sizeof(*counter); + if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { + state->mem = NULL; + state->off = 0; + } + return true; +} +EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); + +void xt_percpu_counter_free(struct xt_counters *counters) +{ + unsigned long pcnt = counters->pcnt; + + if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) + free_percpu((void __percpu *)pcnt); +} +EXPORT_SYMBOL_GPL(xt_percpu_counter_free); + static int __net_init xt_net_init(struct net *net) { int i; diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index e7ac07e53b59..febcfac7e3df 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -168,8 +168,10 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par, goto err_put_timeout; } timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC); - if (timeout_ext == NULL) + if (!timeout_ext) { ret = -ENOMEM; + goto err_put_timeout; + } rcu_read_unlock(); return ret; @@ -201,6 +203,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, struct xt_ct_target_info_v1 *info) { struct nf_conntrack_zone zone; + struct nf_conn_help *help; struct nf_conn *ct; int ret = -EOPNOTSUPP; @@ -249,7 +252,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, if (info->timeout[0]) { ret = xt_ct_set_timeout(ct, par, info->timeout); if (ret < 0) - goto err3; + goto err4; } __set_bit(IPS_CONFIRMED_BIT, &ct->status); nf_conntrack_get(&ct->ct_general); @@ -257,6 +260,10 @@ out: info->ct = ct; return 0; +err4: + help = nfct_help(ct); + if (help) + module_put(help->helper->me); err3: nf_ct_tmpl_free(ct); err2: diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index f9eb8641dc3a..4c126cd18469 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -331,11 +331,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n", __func__, ret); + INIT_WORK(&info->timer->work, idletimer_tg_work); + mod_timer(&info->timer->timer, msecs_to_jiffies(info->timeout * 1000) + jiffies); - INIT_WORK(&info->timer->work, idletimer_tg_work); - return 0; out_free_attr: @@ -420,7 +420,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par) pr_debug("timeout value is zero\n"); return -EINVAL; } - + if (info->timeout >= INT_MAX / 1000) { + pr_debug("timeout value is too big\n"); + return -EINVAL; + } if (info->label[0] == '\0' || strnlen(info->label, MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) { diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 3ba31c194cce..0858fe17e14a 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par) goto exit_alloc; } - /* See if we need to set up a timer */ - if (ledinfo->delay > 0) - setup_timer(&ledinternal->timer, led_timeout_callback, - (unsigned long)ledinternal); + /* Since the letinternal timer can be shared between multiple targets, + * always set it up, even if the current target does not need it + */ + setup_timer(&ledinternal->timer, led_timeout_callback, + (unsigned long)ledinternal); list_add_tail(&ledinternal->list, &xt_led_triggers); @@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par) list_del(&ledinternal->list); - if (ledinfo->delay > 0) - del_timer_sync(&ledinternal->timer); + del_timer_sync(&ledinternal->timer); led_trigger_unregister(&ledinternal->netfilter_led_trigger); diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 178696852bde..7381be0cdcdf 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -668,8 +668,9 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par) if (info->cfg.gc_interval == 0 || info->cfg.expire == 0) return -EINVAL; - if (info->name[sizeof(info->name)-1] != '\0') - return -EINVAL; + ret = xt_check_proc_name(info->name, sizeof(info->name)); + if (ret) + return ret; if (par->family == NFPROTO_IPV4) { if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32) return -EINVAL; diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c index fa011b34af23..e82524b21d07 100644 --- a/net/netfilter/xt_qtaguid.c +++ b/net/netfilter/xt_qtaguid.c @@ -968,8 +968,8 @@ static void iface_stat_create(struct net_device *net_dev, IF_DEBUG("qtaguid: iface_stat: create(%s): " "ifa=%p ifa_label=%s\n", ifname, ifa, - ifa->ifa_label ? ifa->ifa_label : "(null)"); - if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label)) + ifa->ifa_label); + if (!strcmp(ifname, ifa->ifa_label)) break; } } @@ -1192,11 +1192,6 @@ static void get_dev_and_dir(const struct sk_buff *skb, par->hooknum, __func__); BUG(); } - if (unlikely(!(*el_dev)->name)) { - pr_err("qtaguid[%d]: %s(): no dev->name?!!\n", - par->hooknum, __func__); - BUG(); - } if (skb->dev && *el_dev != skb->dev) { MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs par->%s=%p %s\n", par->hooknum, skb->dev, skb->dev->name, diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index d725a27743a1..cd53b861a15c 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -364,9 +364,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par, info->hit_count, XT_RECENT_MAX_NSTAMPS - 1); return -EINVAL; } - if (info->name[0] == '\0' || - strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) - return -EINVAL; + ret = xt_check_proc_name(info->name, sizeof(info->name)); + if (ret) + return ret; if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot) nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f59d82f0aa97..83c0f56d05cb 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1034,6 +1034,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, if (addr->sa_family != AF_NETLINK) return -EINVAL; + if (alen < sizeof(struct sockaddr_nl)) + return -EINVAL; + if ((nladdr->nl_groups || nladdr->nl_pid) && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index b2cde0e09809..8336c9607a70 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -1118,6 +1118,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, { struct sk_buff *tmp; struct net *net, *prev = NULL; + bool delivered = false; int err; for_each_net_rcu(net) { @@ -1129,14 +1130,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, } err = nlmsg_multicast(prev->genl_sock, tmp, portid, group, flags); - if (err) + if (!err) + delivered = true; + else if (err != -ESRCH) goto error; } prev = net; } - return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); + err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); + if (!err) + delivered = true; + else if (err != -ESRCH) + return err; + return delivered ? 0 : -ESRCH; error: kfree_skb(skb); return err; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 6a2507f24b0f..1829adb23505 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -361,10 +361,38 @@ ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone, u16 proto, const struct sk_buff *skb) { struct nf_conntrack_tuple tuple; + struct nf_conntrack_expect *exp; if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple)) return NULL; - return __nf_ct_expect_find(net, zone, &tuple); + + exp = __nf_ct_expect_find(net, zone, &tuple); + if (exp) { + struct nf_conntrack_tuple_hash *h; + + /* Delete existing conntrack entry, if it clashes with the + * expectation. This can happen since conntrack ALGs do not + * check for clashes between (new) expectations and existing + * conntrack entries. nf_conntrack_in() will check the + * expectations only if a conntrack entry can not be found, + * which can lead to OVS finding the expectation (here) in the + * init direction, but which will not be removed by the + * nf_conntrack_in() call, if a matching conntrack entry is + * found instead. In this case all init direction packets + * would be reported as new related packets, while reply + * direction packets would be reported as un-related + * established packets. + */ + h = nf_conntrack_find_get(net, zone, &tuple); + if (h) { + struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); + + nf_ct_delete(ct, 0, 0); + nf_conntrack_put(&ct->ct_general); + } + } + + return exp; } /* Determine whether skb->nfct is equal to the result of conntrack lookup. */ diff --git a/net/rds/bind.c b/net/rds/bind.c index b22ea956522b..e29b47193645 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -108,6 +108,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) rs, &addr, (int)ntohs(*port)); break; } else { + rs->rs_bound_addr = 0; rds_sock_put(rs); ret = -ENOMEM; break; diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index d7a9ab5a9d9c..6c65fb229e50 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c @@ -209,7 +209,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, struct sk_buff *trailer; unsigned int len; u16 check; - int nsg; + int nsg, err; sp = rxrpc_skb(skb); @@ -240,7 +240,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, len &= ~(call->conn->size_align - 1); sg_init_table(sg, nsg); - skb_to_sgvec(skb, sg, 0, len); + err = skb_to_sgvec(skb, sg, 0, len); + if (unlikely(err < 0)) + return err; crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); _leave(" = 0"); @@ -336,7 +338,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, struct sk_buff *trailer; u32 data_size, buf; u16 check; - int nsg; + int nsg, ret; _enter(""); @@ -348,7 +350,9 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call, goto nomem; sg_init_table(sg, nsg); - skb_to_sgvec(skb, sg, 0, 8); + ret = skb_to_sgvec(skb, sg, 0, 8); + if (unlikely(ret < 0)) + return ret; /* start the decryption afresh */ memset(&iv, 0, sizeof(iv)); @@ -411,7 +415,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, struct sk_buff *trailer; u32 data_size, buf; u16 check; - int nsg; + int nsg, ret; _enter(",{%d}", skb->len); @@ -430,7 +434,12 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call, } sg_init_table(sg, nsg); - skb_to_sgvec(skb, sg, 0, skb->len); + ret = skb_to_sgvec(skb, sg, 0, skb->len); + if (unlikely(ret < 0)) { + if (sg != _sg) + kfree(sg); + return ret; + } /* decrypt from the session key */ token = call->conn->key->payload.data[0]; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 694a06f1e0d5..f44fea22d69c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -101,8 +101,10 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, a->order = n_i; nest = nla_nest_start(skb, a->order); - if (nest == NULL) + if (nest == NULL) { + index--; goto nla_put_failure; + } err = tcf_action_dump_1(skb, a, 0, 0); if (err < 0) { index--; diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 0bc6f912f870..bd155e59be1c 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -249,10 +249,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) { - if (cfg->is_ebpf) - bpf_prog_put(cfg->filter); - else - bpf_prog_destroy(cfg->filter); + struct bpf_prog *filter = cfg->filter; + + if (filter) { + if (cfg->is_ebpf) + bpf_prog_put(filter); + else + bpf_prog_destroy(filter); + } kfree(cfg->bpf_ops); kfree(cfg->bpf_name); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index eeb3eb3ea9eb..024d6cf342c5 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -175,6 +175,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct tcphdr *tcph; const struct iphdr *iph; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + return 1; + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; @@ -196,6 +199,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct tcphdr *tcph; const struct ipv6hdr *ip6h; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + return 1; + tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; @@ -219,6 +225,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, const struct iphdr *iph; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, @@ -272,6 +281,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, const struct ipv6hdr *ip6h; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 8a61ccc37e12..edb8514b4e00 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -323,8 +323,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); bdst = ip6_dst_lookup_flow(sk, fl6, final_p); - if (!IS_ERR(bdst) && - ipv6_chk_addr(dev_net(bdst->dev), + if (IS_ERR(bdst)) + continue; + + if (ipv6_chk_addr(dev_net(bdst->dev), &laddr->a.v6.sin6_addr, bdst->dev, 1)) { if (!IS_ERR_OR_NULL(dst)) dst_release(dst); @@ -333,8 +335,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); - if (matchlen > bmatchlen) + if (matchlen > bmatchlen) { + dst_release(bdst); continue; + } if (!IS_ERR_OR_NULL(dst)) dst_release(dst); @@ -719,8 +723,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) sctp_v6_map_v4(addr); } - if (addr->sa.sa_family == AF_INET) + if (addr->sa.sa_family == AF_INET) { + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); return sizeof(struct sockaddr_in); + } return sizeof(struct sockaddr_in6); } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8b4ff315695e..dc030efa4447 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -508,22 +508,20 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (IS_ERR(rt)) continue; - if (!dst) - dst = &rt->dst; - /* Ensure the src address belongs to the output * interface. */ odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { - if (&rt->dst != dst) + if (!dst) + dst = &rt->dst; + else dst_release(&rt->dst); continue; } - if (dst != &rt->dst) - dst_release(dst); + dst_release(dst); dst = &rt->dst; break; } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5d6a03fad378..509e9426a056 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1367,10 +1367,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, sctp_chunkhdr_t *chunk_hdr; struct sk_buff *skb; struct sock *sk; + int chunklen; + + chunklen = WORD_ROUND(sizeof(*chunk_hdr) + paylen); + if (chunklen > SCTP_MAX_CHUNK_LEN) + goto nodata; /* No need to allocate LL here, as this is only a chunk. */ - skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), - GFP_ATOMIC); + skb = alloc_skb(chunklen, GFP_ATOMIC); if (!skb) goto nodata; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index df6a4b2d0728..13c7f42b7040 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -335,11 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, if (!opt->pf->af_supported(addr->sa.sa_family, opt)) return NULL; - /* V4 mapped address are really of AF_INET family */ - if (addr->sa.sa_family == AF_INET6 && - ipv6_addr_v4mapped(&addr->v6.sin6_addr) && - !opt->pf->af_supported(AF_INET, opt)) - return NULL; + if (addr->sa.sa_family == AF_INET6) { + if (len < SIN6_LEN_RFC2133) + return NULL; + /* V4 mapped address are really of AF_INET family */ + if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) && + !opt->pf->af_supported(AF_INET, opt)) + return NULL; + } /* If we get this far, af is valid. */ af = sctp_get_af_specific(addr->sa.sa_family); @@ -1518,7 +1521,7 @@ static void sctp_close(struct sock *sk, long timeout) pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING; @@ -1569,7 +1572,7 @@ static void sctp_close(struct sock *sk, long timeout) * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); - bh_lock_sock(sk); + bh_lock_sock_nested(sk); /* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 728d65fbab0c..c9c0976d3bbb 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2363,7 +2363,12 @@ static void xs_tcp_setup_socket(struct work_struct *work) case -EHOSTUNREACH: case -EADDRINUSE: case -ENOBUFS: - /* retry with existing socket, after a delay */ + /* + * xs_tcp_force_close() wakes tasks with -EIO. + * We need to wake them first to ensure the + * correct error code. + */ + xprt_wake_pending_tasks(xprt, status); xs_tcp_force_close(xprt); goto out; } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index a750f330b8dd..c6ab4da4b8e2 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1794,32 +1794,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb) static int __init x25_init(void) { - int rc = proto_register(&x25_proto, 0); + int rc; - if (rc != 0) + rc = proto_register(&x25_proto, 0); + if (rc) goto out; rc = sock_register(&x25_family_ops); - if (rc != 0) + if (rc) goto out_proto; dev_add_pack(&x25_packet_type); rc = register_netdevice_notifier(&x25_dev_notifier); - if (rc != 0) + if (rc) goto out_sock; - pr_info("Linux Version 0.2\n"); + rc = x25_register_sysctl(); + if (rc) + goto out_dev; - x25_register_sysctl(); rc = x25_proc_init(); - if (rc != 0) - goto out_dev; + if (rc) + goto out_sysctl; + + pr_info("Linux Version 0.2\n"); + out: return rc; +out_sysctl: + x25_unregister_sysctl(); out_dev: unregister_netdevice_notifier(&x25_dev_notifier); out_sock: + dev_remove_pack(&x25_packet_type); sock_unregister(AF_X25); out_proto: proto_unregister(&x25_proto); diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c index 43239527a205..703d46aae7a2 100644 --- a/net/x25/sysctl_net_x25.c +++ b/net/x25/sysctl_net_x25.c @@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = { { 0, }, }; -void __init x25_register_sysctl(void) +int __init x25_register_sysctl(void) { x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); + if (!x25_table_header) + return -ENOMEM; + return 0; } void x25_unregister_sysctl(void) diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index ccfdc7115a83..a00ec715aa46 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) struct crypto_comp *tfm; /* This can be any valid CPU ID so we don't need locking. */ - tfm = __this_cpu_read(*pos->tfms); + tfm = this_cpu_read(*pos->tfms); if (!strcmp(crypto_comp_name(tfm), alg_name)) { pos->users++; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 7944daeb7378..70535b8ee4d6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1208,6 +1208,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig) x->curlft.add_time = orig->curlft.add_time; x->km.state = orig->km.state; x->km.seq = orig->km.seq; + x->replay = orig->replay; + x->preplay = orig->preplay; return x; @@ -1845,6 +1847,11 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen struct xfrm_mgr *km; struct xfrm_policy *pol = NULL; +#ifdef CONFIG_COMPAT + if (is_compat_task()) + return -EOPNOTSUPP; +#endif + if (!optval && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ed5c79ab0d23..b88a62f45284 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -121,22 +121,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p, struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; struct xfrm_replay_state_esn *rs; - if (p->flags & XFRM_STATE_ESN) { - if (!rt) - return -EINVAL; + if (!rt) + return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0; - rs = nla_data(rt); + rs = nla_data(rt); - if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) - return -EINVAL; - - if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && - nla_len(rt) != sizeof(*rs)) - return -EINVAL; - } + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) + return -EINVAL; - if (!rt) - return 0; + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && + nla_len(rt) != sizeof(*rs)) + return -EINVAL; /* As only ESP and AH support ESN feature. */ if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 9f9cd525960f..16dc35023e87 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -275,6 +275,11 @@ cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \ # DTC # --------------------------------------------------------------------------- +# Disable noisy checks by default +ifeq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),) +DTC_FLAGS += -Wno-unit_address_vs_reg +endif + # Generate an assembly file to wrap the output of the device tree compiler quiet_cmd_dt_S_dtb= DTB $@ cmd_dt_S_dtb= \ @@ -282,11 +287,11 @@ cmd_dt_S_dtb= \ echo '\#include <asm-generic/vmlinux.lds.h>'; \ echo '.section .dtb.init.rodata,"a"'; \ echo '.balign STRUCT_ALIGNMENT'; \ - echo '.global __dtb_$(*F)_begin'; \ - echo '__dtb_$(*F)_begin:'; \ + echo '.global __dtb_$(subst -,_,$(*F))_begin'; \ + echo '__dtb_$(subst -,_,$(*F))_begin:'; \ echo '.incbin "$<" '; \ - echo '__dtb_$(*F)_end:'; \ - echo '.global __dtb_$(*F)_end'; \ + echo '__dtb_$(subst -,_,$(*F))_end:'; \ + echo '.global __dtb_$(subst -,_,$(*F))_end'; \ echo '.balign STRUCT_ALIGNMENT'; \ ) > $@ diff --git a/scripts/tags.sh b/scripts/tags.sh index 262889046703..45e246595d10 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -106,6 +106,7 @@ all_compiled_sources() case "$i" in *.[cS]) j=${i/\.[cS]/\.o} + j="${j#$tree}" if [ -e $j ]; then echo $i fi diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index dec607c17b64..6dc4ce47580f 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -722,7 +722,7 @@ module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); /* Maximum pathname length before accesses will start getting rejected */ unsigned int aa_g_path_max = 2 * PATH_MAX; -module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); +module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR); /* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 19014293f927..8da7c91b725d 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -206,7 +206,8 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, if (opened & FILE_CREATED) iint->flags |= IMA_NEW_FILE; if ((iint->flags & IMA_NEW_FILE) && - !(iint->flags & IMA_DIGSIG_REQUIRED)) + (!(iint->flags & IMA_DIGSIG_REQUIRED) || + (inode->i_size == 0))) status = INTEGRITY_PASS; goto out; } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index cc7689c5c153..e3f8891c5d72 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -333,18 +333,6 @@ static void superblock_free_security(struct super_block *sb) kfree(sbsec); } -/* The file system's label must be initialized prior to use. */ - -static const char *labeling_behaviors[7] = { - "uses xattr", - "uses transition SIDs", - "uses task SIDs", - "uses genfs_contexts", - "not configured for labeling", - "uses mountpoint labeling", - "uses native labeling", -}; - static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry); static inline int inode_doinit(struct inode *inode) @@ -457,10 +445,6 @@ static int sb_finish_set_opts(struct super_block *sb) } } - if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", - sb->s_id, sb->s_type->name); - sbsec->flags |= SE_SBINITIALIZED; if (selinux_is_sblabel_mnt(sb)) sbsec->flags |= SBLABEL_MNT; @@ -1961,8 +1945,9 @@ static inline u32 file_to_av(struct file *file) static inline u32 open_file_to_av(struct file *file) { u32 av = file_to_av(file); + struct inode *inode = file_inode(file); - if (selinux_policycap_openperm) + if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC) av |= FILE__OPEN; return av; @@ -2931,6 +2916,7 @@ static int selinux_inode_permission(struct inode *inode, int mask) static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) { const struct cred *cred = current_cred(); + struct inode *inode = d_backing_inode(dentry); unsigned int ia_valid = iattr->ia_valid; __u32 av = FILE__WRITE; @@ -2946,8 +2932,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) return dentry_has_perm(cred, dentry, FILE__SETATTR); - if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE) - && !(ia_valid & ATTR_FILE)) + if (selinux_policycap_openperm && + inode->i_sb->s_magic != SOCKFS_MAGIC && + (ia_valid & ATTR_SIZE) && + !(ia_valid & ATTR_FILE)) av |= FILE__OPEN; return dentry_has_perm(cred, dentry, av); @@ -4159,10 +4147,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in u32 sid, node_perm; if (family == PF_INET) { + if (addrlen < sizeof(struct sockaddr_in)) { + err = -EINVAL; + goto out; + } addr4 = (struct sockaddr_in *)address; snum = ntohs(addr4->sin_port); addrp = (char *)&addr4->sin_addr.s_addr; } else { + if (addrlen < SIN6_LEN_RFC2133) { + err = -EINVAL; + goto out; + } addr6 = (struct sockaddr_in6 *)address; snum = ntohs(addr6->sin6_port); addrp = (char *)&addr6->sin6_addr.s6_addr; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index db7eff3573a9..23dca68ffe25 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -155,7 +155,7 @@ static int selinux_set_mapping(struct policydb *pol, } k = 0; - while (p_in->perms && p_in->perms[k]) { + while (p_in->perms[k]) { /* An empty permission string skips ahead */ if (!*p_in->perms[k]) { k++; diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 494b7b533366..6cd8aec146f2 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1361,7 +1361,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const char __user *buf, size_t bytes) { size_t xfer = 0; - ssize_t tmp; + ssize_t tmp = 0; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) @@ -1468,7 +1468,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf, static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __user *buf, size_t bytes) { size_t xfer = 0; - ssize_t tmp; + ssize_t tmp = 0; struct snd_pcm_runtime *runtime = substream->runtime; if (atomic_read(&substream->mmap_count)) @@ -1814,10 +1814,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) return -ENOMEM; _snd_pcm_hw_params_any(params); err = snd_pcm_hw_refine(substream, params); - format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); - kfree(params); if (err < 0) - return err; + goto error; + format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); for (fmt = 0; fmt < 32; ++fmt) { if (snd_mask_test(&format_mask, fmt)) { int f = snd_pcm_oss_format_to(fmt); @@ -1825,7 +1824,10 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file) formats |= f; } } - return formats; + + error: + kfree(params); + return err < 0 ? err : formats; } static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format) diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 51110252e3f3..7c36499491c5 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3460,7 +3460,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, area, substream->runtime->dma_area, substream->runtime->dma_addr, - area->vm_end - area->vm_start); + substream->runtime->dma_bytes); #endif /* CONFIG_X86 */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 167b943469ab..73ee8476584d 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -270,12 +270,12 @@ static int seq_free_client1(struct snd_seq_client *client) if (!client) return 0; - snd_seq_delete_all_ports(client); - snd_seq_queue_client_leave(client->number); spin_lock_irqsave(&clients_lock, flags); clienttablock[client->number] = 1; clienttab[client->number] = NULL; spin_unlock_irqrestore(&clients_lock, flags); + snd_seq_delete_all_ports(client); + snd_seq_queue_client_leave(client->number); snd_use_lock_sync(&client->use_lock); snd_seq_queue_client_termination(client->number); if (client->pool) @@ -919,7 +919,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop) static int snd_seq_client_enqueue_event(struct snd_seq_client *client, struct snd_seq_event *event, struct file *file, int blocking, - int atomic, int hop) + int atomic, int hop, + struct mutex *mutexp) { struct snd_seq_event_cell *cell; int err; @@ -957,7 +958,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client, return -ENXIO; /* queue is not allocated */ /* allocate an event cell */ - err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file); + err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, + file, mutexp); if (err < 0) return err; @@ -1026,12 +1028,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, return -ENXIO; /* allocate the pool now if the pool is not allocated yet */ + mutex_lock(&client->ioctl_mutex); if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) { - mutex_lock(&client->ioctl_mutex); err = snd_seq_pool_init(client->pool); - mutex_unlock(&client->ioctl_mutex); if (err < 0) - return -ENOMEM; + goto out; } /* only process whole events */ @@ -1082,7 +1083,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, /* ok, enqueue it */ err = snd_seq_client_enqueue_event(client, &event, file, !(file->f_flags & O_NONBLOCK), - 0, 0); + 0, 0, &client->ioctl_mutex); if (err < 0) break; @@ -1093,6 +1094,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf, written += len; } + out: + mutex_unlock(&client->ioctl_mutex); return written ? written : err; } @@ -1924,6 +1927,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, (! snd_seq_write_pool_allocated(client) || info.output_pool != client->pool->size)) { if (snd_seq_write_pool_allocated(client)) { + /* is the pool in use? */ + if (atomic_read(&client->pool->counter)) + return -EBUSY; /* remove all existing cells */ snd_seq_pool_mark_closing(client->pool); snd_seq_queue_client_leave_cells(client->number); @@ -2347,7 +2353,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev, if (! cptr->accept_output) result = -EPERM; else /* send it */ - result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop); + result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, + atomic, hop, NULL); snd_seq_client_unlock(cptr); return result; diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 3490d21ab9e7..9acbed1ac982 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c @@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f, return -EINVAL; snd_use_lock_use(&f->use_lock); - err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */ + err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */ if (err < 0) { if ((err == -ENOMEM) || (err == -EAGAIN)) atomic_inc(&f->overflow); diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 5847c4475bf3..4c8cbcd89887 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell) */ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell **cellp, - int nonblock, struct file *file) + int nonblock, struct file *file, + struct mutex *mutexp) { struct snd_seq_event_cell *cell; unsigned long flags; @@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&pool->output_sleep, &wait); spin_unlock_irq(&pool->lock); + if (mutexp) + mutex_unlock(mutexp); schedule(); + if (mutexp) + mutex_lock(mutexp); spin_lock_irq(&pool->lock); remove_wait_queue(&pool->output_sleep, &wait); /* interrupted? */ @@ -288,7 +293,7 @@ __error: */ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, - struct file *file) + struct file *file, struct mutex *mutexp) { int ncells, err; unsigned int extlen; @@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, if (ncells >= pool->total_elements) return -ENOMEM; - err = snd_seq_cell_alloc(pool, &cell, nonblock, file); + err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp); if (err < 0) return err; @@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, int size = sizeof(struct snd_seq_event); if (len < size) size = len; - err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); + err = snd_seq_cell_alloc(pool, &tmp, nonblock, file, + mutexp); if (err < 0) goto __error; if (cell->event.data.ext.ptr == NULL) diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h index 32f959c17786..3abe306c394a 100644 --- a/sound/core/seq/seq_memory.h +++ b/sound/core/seq/seq_memory.h @@ -66,7 +66,8 @@ struct snd_seq_pool { void snd_seq_cell_free(struct snd_seq_event_cell *cell); int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, - struct snd_seq_event_cell **cellp, int nonblock, struct file *file); + struct snd_seq_event_cell **cellp, int nonblock, + struct file *file, struct mutex *mutexp); /* return number of unused (free) cells */ static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c index bc1c8488fc2a..2bc6759e4adc 100644 --- a/sound/core/seq/seq_prioq.c +++ b/sound/core/seq/seq_prioq.c @@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo) if (f->cells > 0) { /* drain prioQ */ while (f->cells > 0) - snd_seq_cell_free(snd_seq_prioq_cell_out(f)); + snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL)); } kfree(f); @@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f, return 0; } +/* return 1 if the current time >= event timestamp */ +static int event_is_ready(struct snd_seq_event *ev, void *current_time) +{ + if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK) + return snd_seq_compare_tick_time(current_time, &ev->time.tick); + else + return snd_seq_compare_real_time(current_time, &ev->time.time); +} + /* dequeue cell from prioq */ -struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) +struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, + void *current_time) { struct snd_seq_event_cell *cell; unsigned long flags; @@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f) spin_lock_irqsave(&f->lock, flags); cell = f->head; + if (cell && current_time && !event_is_ready(&cell->event, current_time)) + cell = NULL; if (cell) { f->head = cell->next; @@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f) return f->cells; } - -/* peek at cell at the head of the prioq */ -struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f) -{ - if (f == NULL) { - pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n"); - return NULL; - } - return f->head; -} - - static inline int prioq_match(struct snd_seq_event_cell *cell, int client, int timestamp) { diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h index d38bb78d9345..2c315ca10fc4 100644 --- a/sound/core/seq/seq_prioq.h +++ b/sound/core/seq/seq_prioq.h @@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo); int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell); /* dequeue cell from prioq */ -struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f); +struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f, + void *current_time); /* return number of events available in prioq */ int snd_seq_prioq_avail(struct snd_seq_prioq *f); -/* peek at cell at the head of the prioq */ -struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f); - /* client left queue */ void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp); diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index 79e0c5604ef8..1a6dc4ff44a6 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c @@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) __again: /* Process tick queue... */ - while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) { - if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick, - &cell->event.time.tick)) { - cell = snd_seq_prioq_cell_out(q->tickq); - if (cell) - snd_seq_dispatch_event(cell, atomic, hop); - } else { - /* event remains in the queue */ + for (;;) { + cell = snd_seq_prioq_cell_out(q->tickq, + &q->timer->tick.cur_tick); + if (!cell) break; - } + snd_seq_dispatch_event(cell, atomic, hop); } - /* Process time queue... */ - while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) { - if (snd_seq_compare_real_time(&q->timer->cur_time, - &cell->event.time.time)) { - cell = snd_seq_prioq_cell_out(q->timeq); - if (cell) - snd_seq_dispatch_event(cell, atomic, hop); - } else { - /* event remains in the queue */ + for (;;) { + cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time); + if (!cell) break; - } + snd_seq_dispatch_event(cell, atomic, hop); } /* free lock */ diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c index cbd20cb8ca11..dc91002d1e0d 100644 --- a/sound/drivers/aloop.c +++ b/sound/drivers/aloop.c @@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm) dpcm->timer.expires = 0; } +static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm) +{ + del_timer_sync(&dpcm->timer); +} + #define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK) #define CABLE_VALID_CAPTURE (1 << SNDRV_PCM_STREAM_CAPTURE) #define CABLE_VALID_BOTH (CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE) @@ -326,6 +331,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream) struct loopback_cable *cable = dpcm->cable; int bps, salign; + loopback_timer_stop_sync(dpcm); + salign = (snd_pcm_format_width(runtime->format) * runtime->channels) / 8; bps = salign * runtime->rate; @@ -659,7 +666,9 @@ static void free_cable(struct snd_pcm_substream *substream) return; if (cable->streams[!substream->stream]) { /* other stream is still alive */ + spin_lock_irq(&cable->lock); cable->streams[substream->stream] = NULL; + spin_unlock_irq(&cable->lock); } else { /* free the cable */ loopback->cables[substream->number][dev] = NULL; @@ -699,7 +708,6 @@ static int loopback_open(struct snd_pcm_substream *substream) loopback->cables[substream->number][dev] = cable; } dpcm->cable = cable; - cable->streams[substream->stream] = dpcm; snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); @@ -731,6 +739,11 @@ static int loopback_open(struct snd_pcm_substream *substream) runtime->hw = loopback_pcm_hardware; else runtime->hw = cable->hw; + + spin_lock_irq(&cable->lock); + cable->streams[substream->stream] = dpcm; + spin_unlock_irq(&cable->lock); + unlock: if (err < 0) { free_cable(substream); @@ -745,7 +758,7 @@ static int loopback_close(struct snd_pcm_substream *substream) struct loopback *loopback = substream->private_data; struct loopback_pcm *dpcm = substream->runtime->private_data; - loopback_timer_stop(dpcm); + loopback_timer_stop_sync(dpcm); mutex_lock(&loopback->cable_lock); free_cable(substream); mutex_unlock(&loopback->cable_lock); diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c index b02a5e8cad44..30e4925bf6b0 100644 --- a/sound/firewire/digi00x/amdtp-dot.c +++ b/sound/firewire/digi00x/amdtp-dot.c @@ -28,6 +28,9 @@ */ #define MAX_MIDI_RX_BLOCKS 8 +/* 3 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) + 1. */ +#define MAX_MIDI_PORTS 3 + /* * The double-oh-three algorithm was discovered by Robin Gareus and Damien * Zammit in 2012, with reverse-engineering for Digi 003 Rack. @@ -42,10 +45,8 @@ struct amdtp_dot { unsigned int pcm_channels; struct dot_state state; - unsigned int midi_ports; - /* 2 = MAX(DOT_MIDI_IN_PORTS, DOT_MIDI_OUT_PORTS) */ - struct snd_rawmidi_substream *midi[2]; - int midi_fifo_used[2]; + struct snd_rawmidi_substream *midi[MAX_MIDI_PORTS]; + int midi_fifo_used[MAX_MIDI_PORTS]; int midi_fifo_limit; void (*transfer_samples)(struct amdtp_stream *s, @@ -124,8 +125,8 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate, return -EBUSY; /* - * A first data channel is for MIDI conformant data channel, the rest is - * Multi Bit Linear Audio data channel. + * A first data channel is for MIDI messages, the rest is Multi Bit + * Linear Audio data channel. */ err = amdtp_stream_set_parameters(s, rate, pcm_channels + 1); if (err < 0) @@ -135,11 +136,6 @@ int amdtp_dot_set_parameters(struct amdtp_stream *s, unsigned int rate, p->pcm_channels = pcm_channels; - if (s->direction == AMDTP_IN_STREAM) - p->midi_ports = DOT_MIDI_IN_PORTS; - else - p->midi_ports = DOT_MIDI_OUT_PORTS; - /* * We do not know the actual MIDI FIFO size of most devices. Just * assume two bytes, i.e., one byte can be received over the bus while @@ -281,13 +277,25 @@ static void write_midi_messages(struct amdtp_stream *s, __be32 *buffer, b = (u8 *)&buffer[0]; len = 0; - if (port < p->midi_ports && + if (port < MAX_MIDI_PORTS && midi_ratelimit_per_packet(s, port) && p->midi[port] != NULL) len = snd_rawmidi_transmit(p->midi[port], b + 1, 2); if (len > 0) { - b[3] = (0x10 << port) | len; + /* + * Upper 4 bits of LSB represent port number. + * - 0000b: physical MIDI port 1. + * - 0010b: physical MIDI port 2. + * - 1110b: console MIDI port. + */ + if (port == 2) + b[3] = 0xe0; + else if (port == 1) + b[3] = 0x20; + else + b[3] = 0x00; + b[3] |= len; midi_use_bytes(s, port, len); } else { b[1] = 0; @@ -309,11 +317,22 @@ static void read_midi_messages(struct amdtp_stream *s, __be32 *buffer, for (f = 0; f < data_blocks; f++) { b = (u8 *)&buffer[0]; - port = b[3] >> 4; - len = b[3] & 0x0f; - if (port < p->midi_ports && p->midi[port] && len > 0) - snd_rawmidi_receive(p->midi[port], b + 1, len); + len = b[3] & 0x0f; + if (len > 0) { + /* + * Upper 4 bits of LSB represent port number. + * - 0000b: physical MIDI port 1. Use port 0. + * - 1110b: console MIDI port. Use port 2. + */ + if (b[3] >> 4 > 0) + port = 2; + else + port = 0; + + if (port < MAX_MIDI_PORTS && p->midi[port]) + snd_rawmidi_receive(p->midi[port], b + 1, len); + } buffer += s->data_block_quadlets; } @@ -364,7 +383,7 @@ void amdtp_dot_midi_trigger(struct amdtp_stream *s, unsigned int port, { struct amdtp_dot *p = s->protocol; - if (port < p->midi_ports) + if (port < MAX_MIDI_PORTS) ACCESS_ONCE(p->midi[port]) = midi; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 20512fe32a97..fbd00821e326 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -184,6 +184,10 @@ module_param(power_save, xint, 0644); MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " "(in second, 0 = disable)."); +static bool pm_blacklist = true; +module_param(pm_blacklist, bool, 0644); +MODULE_PARM_DESC(pm_blacklist, "Enable power-management blacklist"); + /* reset the HD-audio controller in power save mode. * this may give more power-saving, but will take longer time to * wake up. @@ -2055,6 +2059,24 @@ out_free: return err; } +#ifdef CONFIG_PM +/* On some boards setting power_save to a non 0 value leads to clicking / + * popping sounds when ever we enter/leave powersaving mode. Ideally we would + * figure out how to avoid these sounds, but that is not always feasible. + * So we keep a list of devices where we disable powersaving as its known + * to causes problems on these devices. + */ +static struct snd_pci_quirk power_save_blacklist[] = { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ + SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0), + /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */ + SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0), + /* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */ + SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0), + {} +}; +#endif /* CONFIG_PM */ + /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { [AZX_DRIVER_NVIDIA] = 8, @@ -2067,6 +2089,7 @@ static int azx_probe_continue(struct azx *chip) struct hdac_bus *bus = azx_bus(chip); struct pci_dev *pci = chip->pci; int dev = chip->dev_index; + int val; int err; hda->probe_continued = 1; @@ -2142,7 +2165,21 @@ static int azx_probe_continue(struct azx *chip) chip->running = 1; azx_add_card_list(chip); - snd_hda_set_power_save(&chip->bus, power_save * 1000); + + val = power_save; +#ifdef CONFIG_PM + if (pm_blacklist) { + const struct snd_pci_quirk *q; + + q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); + if (q && val) { + dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n", + q->subvendor, q->subdevice); + val = 0; + } + } +#endif /* CONFIG_PM */ + snd_hda_set_power_save(&chip->bus, val * 1000); if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) pm_runtime_put_noidle(&pci->dev); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c92b7ba344ef..9fae1d248318 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -849,6 +849,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b302d056e5d3..8cb14e27988b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3261,8 +3261,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled) pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid); pinval &= ~AC_PINCTL_VREFEN; pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80; - if (spec->mute_led_nid) + if (spec->mute_led_nid) { + /* temporarily power up/down for setting VREF */ + snd_hda_power_up_pm(codec); snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval); + snd_hda_power_down_pm(codec); + } } /* Make sure the led works even in runtime suspend */ @@ -4722,6 +4726,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec, } } +/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */ +static void alc295_fixup_disable_dac3(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + hda_nid_t conn[2] = { 0x02, 0x03 }; + snd_hda_override_conn_list(codec, 0x17, 2, conn); + } +} + /* Hook to update amp GPIO4 for automute */ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, struct hda_jack_callback *jack) @@ -4871,6 +4885,7 @@ enum { ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, ALC255_FIXUP_DELL_SPK_NOISE, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC295_FIXUP_DISABLE_DAC3, ALC280_FIXUP_HP_HEADSET_MIC, ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, @@ -5560,6 +5575,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE, }, + [ALC295_FIXUP_DISABLE_DAC3] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_disable_dac3, + }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -5617,6 +5636,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3), SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -6701,6 +6721,7 @@ enum { ALC668_FIXUP_DELL_DISABLE_AAMIX, ALC668_FIXUP_DELL_XPS13, ALC662_FIXUP_ASUS_Nx50, + ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE, ALC668_FIXUP_ASUS_Nx51, }; @@ -6948,14 +6969,21 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC662_FIXUP_BASS_1A }, + [ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_headset_mode_alc668, + .chain_id = ALC662_FIXUP_BASS_CHMAP + }, [ALC668_FIXUP_ASUS_Nx51] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { - {0x1a, 0x90170151}, /* bass speaker */ + { 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */ + { 0x1a, 0x90170151 }, /* bass speaker */ + { 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */ {} }, .chained = true, - .chain_id = ALC662_FIXUP_BASS_CHMAP, + .chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE, }, }; diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c index a74c64c7053c..e83da42a8c03 100644 --- a/sound/soc/intel/atom/sst/sst_stream.c +++ b/sound/soc/intel/atom/sst/sst_stream.c @@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, sst_free_block(sst_drv_ctx, block); out: test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id); - return 0; + return ret; } /* diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c index 38d65a3529c4..44d560966e9c 100644 --- a/sound/soc/intel/boards/cht_bsw_rt5645.c +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c @@ -96,6 +96,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Int Mic", NULL), + SND_SOC_DAPM_MIC("Int Analog Mic", NULL), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control, SND_SOC_DAPM_POST_PMD), @@ -106,6 +107,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = { {"IN1N", NULL, "Headset Mic"}, {"DMIC L1", NULL, "Int Mic"}, {"DMIC R1", NULL, "Int Mic"}, + {"IN2P", NULL, "Int Analog Mic"}, + {"IN2N", NULL, "Int Analog Mic"}, {"Headphone", NULL, "HPOL"}, {"Headphone", NULL, "HPOR"}, {"Ext Spk", NULL, "SPOL"}, @@ -119,6 +122,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = { {"Headphone", NULL, "Platform Clock"}, {"Headset Mic", NULL, "Platform Clock"}, {"Int Mic", NULL, "Platform Clock"}, + {"Int Analog Mic", NULL, "Platform Clock"}, + {"Int Analog Mic", NULL, "micbias1"}, + {"Int Analog Mic", NULL, "micbias2"}, {"Ext Spk", NULL, "Platform Clock"}, }; @@ -147,6 +153,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone"), SOC_DAPM_PIN_SWITCH("Headset Mic"), SOC_DAPM_PIN_SWITCH("Int Mic"), + SOC_DAPM_PIN_SWITCH("Int Analog Mic"), SOC_DAPM_PIN_SWITCH("Ext Spk"), }; diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index b4844f78266f..f6c3be192cc9 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -280,7 +280,7 @@ static int probe_codec(struct hdac_ext_bus *ebus, int addr) struct hdac_bus *bus = ebus_to_hbus(ebus); unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) | (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID; - unsigned int res; + unsigned int res = -1; mutex_lock(&bus->cmd_mutex); snd_hdac_bus_send_cmd(bus, cmd); diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c index d6473cccf496..0de8b0237aae 100644 --- a/sound/soc/msm/apq8096-auto.c +++ b/sound/soc/msm/apq8096-auto.c @@ -5122,20 +5122,22 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = { .be_id = MSM_FRONTEND_DAI_LSM7, }, { - .name = "Listen 8 Audio Service", - .stream_name = "Listen 8 Audio Service", - .cpu_dai_name = "LSM8", - .platform_name = "msm-lsm-client", + .name = "MSM8996 LowLatency2", + .stream_name = "MultiMedia22", + .cpu_dai_name = "MultiMedia22", + .platform_name = "msm-pcm-dsp.1", .dynamic = 1, - .dpcm_capture = 1, - .trigger = { SND_SOC_DPCM_TRIGGER_POST, - SND_SOC_DPCM_TRIGGER_POST }, - .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, - .ignore_suspend = 1, - .ignore_pmdown_time = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM8, + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA22, + .ops = &apq8096_ll_ops, }, { .name = "MSM8996 LowLatency Loopback", diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c index 26b40d2081f0..8098db80194d 100644 --- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c @@ -91,86 +91,6 @@ enum { RATE_MAX_NUM_OF_AUX_PCM_RATES, }; -enum { - IDX_PRIMARY_TDM_RX_0, - IDX_PRIMARY_TDM_RX_1, - IDX_PRIMARY_TDM_RX_2, - IDX_PRIMARY_TDM_RX_3, - IDX_PRIMARY_TDM_RX_4, - IDX_PRIMARY_TDM_RX_5, - IDX_PRIMARY_TDM_RX_6, - IDX_PRIMARY_TDM_RX_7, - IDX_PRIMARY_TDM_TX_0, - IDX_PRIMARY_TDM_TX_1, - IDX_PRIMARY_TDM_TX_2, - IDX_PRIMARY_TDM_TX_3, - IDX_PRIMARY_TDM_TX_4, - IDX_PRIMARY_TDM_TX_5, - IDX_PRIMARY_TDM_TX_6, - IDX_PRIMARY_TDM_TX_7, - IDX_SECONDARY_TDM_RX_0, - IDX_SECONDARY_TDM_RX_1, - IDX_SECONDARY_TDM_RX_2, - IDX_SECONDARY_TDM_RX_3, - IDX_SECONDARY_TDM_RX_4, - IDX_SECONDARY_TDM_RX_5, - IDX_SECONDARY_TDM_RX_6, - IDX_SECONDARY_TDM_RX_7, - IDX_SECONDARY_TDM_TX_0, - IDX_SECONDARY_TDM_TX_1, - IDX_SECONDARY_TDM_TX_2, - IDX_SECONDARY_TDM_TX_3, - IDX_SECONDARY_TDM_TX_4, - IDX_SECONDARY_TDM_TX_5, - IDX_SECONDARY_TDM_TX_6, - IDX_SECONDARY_TDM_TX_7, - IDX_TERTIARY_TDM_RX_0, - IDX_TERTIARY_TDM_RX_1, - IDX_TERTIARY_TDM_RX_2, - IDX_TERTIARY_TDM_RX_3, - IDX_TERTIARY_TDM_RX_4, - IDX_TERTIARY_TDM_RX_5, - IDX_TERTIARY_TDM_RX_6, - IDX_TERTIARY_TDM_RX_7, - IDX_TERTIARY_TDM_TX_0, - IDX_TERTIARY_TDM_TX_1, - IDX_TERTIARY_TDM_TX_2, - IDX_TERTIARY_TDM_TX_3, - IDX_TERTIARY_TDM_TX_4, - IDX_TERTIARY_TDM_TX_5, - IDX_TERTIARY_TDM_TX_6, - IDX_TERTIARY_TDM_TX_7, - IDX_QUATERNARY_TDM_RX_0, - IDX_QUATERNARY_TDM_RX_1, - IDX_QUATERNARY_TDM_RX_2, - IDX_QUATERNARY_TDM_RX_3, - IDX_QUATERNARY_TDM_RX_4, - IDX_QUATERNARY_TDM_RX_5, - IDX_QUATERNARY_TDM_RX_6, - IDX_QUATERNARY_TDM_RX_7, - IDX_QUATERNARY_TDM_TX_0, - IDX_QUATERNARY_TDM_TX_1, - IDX_QUATERNARY_TDM_TX_2, - IDX_QUATERNARY_TDM_TX_3, - IDX_QUATERNARY_TDM_TX_4, - IDX_QUATERNARY_TDM_TX_5, - IDX_QUATERNARY_TDM_TX_6, - IDX_QUATERNARY_TDM_TX_7, - IDX_TDM_MAX, -}; - -enum { - IDX_GROUP_PRIMARY_TDM_RX, - IDX_GROUP_PRIMARY_TDM_TX, - IDX_GROUP_SECONDARY_TDM_RX, - IDX_GROUP_SECONDARY_TDM_TX, - IDX_GROUP_TERTIARY_TDM_RX, - IDX_GROUP_TERTIARY_TDM_TX, - IDX_GROUP_QUATERNARY_TDM_RX, - IDX_GROUP_QUATERNARY_TDM_TX, - IDX_GROUP_TDM_MAX, -}; - struct msm_dai_q6_dai_data { DECLARE_BITMAP(status_mask, STATUS_MAX); DECLARE_BITMAP(hwfree_status, STATUS_MAX); diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c index 5d3d24b058d3..9af5de2952d4 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-loopback-v2.c @@ -847,7 +847,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->out_ch_map[i] = ucontrol->value.integer.value[i]; @@ -878,7 +878,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_get( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->out_ch_map[i]; return 0; @@ -908,7 +908,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->in_ch_map[i] = ucontrol->value.integer.value[i]; return 0; @@ -938,7 +938,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_get( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->in_ch_map[i]; return 0; @@ -969,13 +969,13 @@ static int msm_pcm_channel_mixer_weight_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) { + if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) { pr_err("%s: invalid channel number %d\n", __func__, channel); return -EINVAL; } channel--; - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->channel_weight[channel][i] = ucontrol->value.integer.value[i]; return 0; @@ -1005,14 +1005,14 @@ static int msm_pcm_channel_mixer_weight_ctl_get( return -EINVAL; } - if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) { + if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) { pr_err("%s: invalid channel number %d\n", __func__, channel); return -EINVAL; } channel--; chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->channel_weight[channel][i]; return 0; @@ -1462,7 +1462,7 @@ static int msm_pcm_add_channel_mixer_controls(struct snd_soc_pcm_runtime *rtd) pr_err("%s: pcm add channel mixer output map controls failed:%d\n", __func__, ret); - for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ret |= msm_pcm_add_channel_mixer_weight_controls(rtd, i); if (ret) pr_err("%s: pcm add channel mixer weight controls failed:%d\n", diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c index a0364bbdfeb9..11b86cb5ff5c 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c @@ -2263,7 +2263,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->out_ch_map[i] = ucontrol->value.integer.value[i]; @@ -2294,7 +2294,7 @@ static int msm_pcm_channel_mixer_output_map_ctl_get( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->out_ch_map[i]; return 0; @@ -2324,7 +2324,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->in_ch_map[i] = ucontrol->value.integer.value[i]; return 0; @@ -2354,7 +2354,7 @@ static int msm_pcm_channel_mixer_input_map_ctl_get( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->in_ch_map[i]; return 0; @@ -2385,13 +2385,13 @@ static int msm_pcm_channel_mixer_weight_ctl_put( } chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) { + if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) { pr_err("%s: invalid channel number %d\n", __func__, channel); return -EINVAL; } channel--; - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) chmixer_pspd->channel_weight[channel][i] = ucontrol->value.integer.value[i]; return 0; @@ -2421,14 +2421,14 @@ static int msm_pcm_channel_mixer_weight_ctl_get( return -EINVAL; } - if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL) { + if (channel <= 0 || channel > PCM_FORMAT_MAX_NUM_CHANNEL_V2) { pr_err("%s: invalid channel number %d\n", __func__, channel); return -EINVAL; } channel--; chmixer_pspd = &(pdata->chmixer_pspd[fe_id][session_type]); - for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 0; i < PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ucontrol->value.integer.value[i] = chmixer_pspd->channel_weight[channel][i]; return 0; @@ -2878,7 +2878,7 @@ static int msm_pcm_add_channel_mixer_controls(struct snd_soc_pcm_runtime *rtd) pr_err("%s: pcm add channel mixer output map controls failed:%d\n", __func__, ret); - for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL; i++) + for (i = 1; i <= PCM_FORMAT_MAX_NUM_CHANNEL_V2; i++) ret |= msm_pcm_add_channel_mixer_weight_controls(rtd, i); if (ret) pr_err("%s: pcm add channel mixer weight controls failed:%d\n", diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 2b712bee0bc1..8a28c4fa6746 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -5844,6 +5844,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_0_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_0, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -5901,6 +5904,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_1_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_1, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -5958,6 +5964,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_2_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_2, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6015,6 +6024,9 @@ static const struct snd_kcontrol_new pri_tdm_rx_3_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_PRI_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_PRI_TDM_RX_3, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_PRI_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6123,6 +6135,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_0_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_0, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6180,6 +6195,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_1_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_1, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6237,6 +6255,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_2_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_2, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6294,6 +6315,9 @@ static const struct snd_kcontrol_new sec_tdm_rx_3_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SEC_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SEC_TDM_RX_3, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_SEC_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6402,6 +6426,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_0_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_0, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6510,6 +6537,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_1_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_1, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6567,6 +6597,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_2_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_2, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6624,6 +6657,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_3_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_3, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6681,6 +6717,9 @@ static const struct snd_kcontrol_new tert_tdm_rx_4_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_TERT_TDM_RX_4, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_TERT_TDM_RX_4, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_TERT_TDM_RX_4, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6741,6 +6780,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_0, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_0, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6852,6 +6894,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_1, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_1, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6912,6 +6957,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_2, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_2, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -6972,6 +7020,9 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_QUAT_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_QUAT_TDM_RX_3, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), SOC_SINGLE_EXT("MultiMedia26", MSM_BACKEND_DAI_QUAT_TDM_RX_3, MSM_FRONTEND_DAI_MULTIMEDIA26, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), @@ -13780,6 +13831,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"}, @@ -13800,6 +13852,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"PRI_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"PRI_TDM_RX_1", NULL, "PRI_TDM_RX_1 Audio Mixer"}, @@ -13820,6 +13873,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"PRI_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"PRI_TDM_RX_2", NULL, "PRI_TDM_RX_2 Audio Mixer"}, @@ -13840,6 +13894,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"PRI_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"PRI_TDM_RX_3", NULL, "PRI_TDM_RX_3 Audio Mixer"}, @@ -13878,6 +13933,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"}, @@ -13898,6 +13954,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"SEC_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"SEC_TDM_RX_1", NULL, "SEC_TDM_RX_1 Audio Mixer"}, @@ -13918,6 +13975,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"SEC_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"SEC_TDM_RX_2", NULL, "SEC_TDM_RX_2 Audio Mixer"}, @@ -13938,6 +13996,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"SEC_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"SEC_TDM_RX_3", NULL, "SEC_TDM_RX_3 Audio Mixer"}, @@ -13976,6 +14035,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"TERT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"TERT_TDM_RX_0", NULL, "TERT_TDM_RX_0 Audio Mixer"}, @@ -14014,6 +14074,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"TERT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"TERT_TDM_RX_1", NULL, "TERT_TDM_RX_1 Audio Mixer"}, @@ -14034,6 +14095,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"TERT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"TERT_TDM_RX_2", NULL, "TERT_TDM_RX_2 Audio Mixer"}, @@ -14054,6 +14116,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"TERT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"TERT_TDM_RX_3", NULL, "TERT_TDM_RX_3 Audio Mixer"}, @@ -14074,6 +14137,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia15", "MM_DL15"}, {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"TERT_TDM_RX_4 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"TERT_TDM_RX_4", NULL, "TERT_TDM_RX_4 Audio Mixer"}, @@ -14095,47 +14159,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia20", "MM_DL20"}, {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"QUAT_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"QUAT_TDM_RX_0", NULL, "QUAT_TDM_RX_0 Audio Mixer"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, - {"PRI_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, - {"PRI_TDM_RX_0", NULL, "PRI_TDM_RX_0 Audio Mixer"}, - - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia1", "MM_DL1"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia2", "MM_DL2"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia3", "MM_DL3"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia4", "MM_DL4"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia5", "MM_DL5"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia6", "MM_DL6"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia7", "MM_DL7"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia8", "MM_DL8"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia9", "MM_DL9"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia10", "MM_DL10"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia11", "MM_DL11"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia12", "MM_DL12"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia13", "MM_DL13"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia14", "MM_DL14"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia15", "MM_DL15"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia16", "MM_DL16"}, - {"SEC_TDM_RX_0 Audio Mixer", "MultiMedia26", "MM_DL26"}, - {"SEC_TDM_RX_0", NULL, "SEC_TDM_RX_0 Audio Mixer"}, - {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia1", "MM_DL1"}, {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia2", "MM_DL2"}, {"QUAT_TDM_TX_0 Audio Mixer", "MultiMedia3", "MM_DL3"}, @@ -14172,6 +14199,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia20", "MM_DL20"}, {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"QUAT_TDM_RX_1 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"QUAT_TDM_RX_1", NULL, "QUAT_TDM_RX_1 Audio Mixer"}, @@ -14193,6 +14221,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia20", "MM_DL20"}, {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"QUAT_TDM_RX_2 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"QUAT_TDM_RX_2", NULL, "QUAT_TDM_RX_2 Audio Mixer"}, @@ -14214,6 +14243,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia16", "MM_DL16"}, {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia20", "MM_DL20"}, {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia22", "MM_DL22"}, {"QUAT_TDM_RX_3 Audio Mixer", "MultiMedia26", "MM_DL26"}, {"QUAT_TDM_RX_3", NULL, "QUAT_TDM_RX_3 Audio Mixer"}, diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index 4b76f1b8af1d..24d4199ac30b 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -454,7 +454,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, int channel_index) { struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL; - struct param_hdr_v3 data_v5 = {0,}; + struct param_hdr_v1 data_v5 = {0,}; int ret = 0, port_idx, sz = 0, param_size = 0; u16 *adm_pspd_params; u16 *ptr; @@ -511,7 +511,7 @@ int adm_programable_channel_mixer(int port_id, int copp_idx, int session_id, data_v5.param_size = param_size; adm_params->payload_size = sizeof(struct default_chmixer_param_id_coeff) + - sizeof(struct param_hdr_v3) + data_v5.param_size; + sizeof(struct param_hdr_v1) + data_v5.param_size; adm_pspd_params = (u16 *)((u8 *)adm_params + sizeof(struct adm_cmd_set_pspd_mtmx_strtr_params_v5)); memcpy(adm_pspd_params, &data_v5, sizeof(data_v5)); diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c index 352ea9257832..84ab632d9b9c 100644 --- a/sound/soc/msm/qdsp6v2/q6afe.c +++ b/sound/soc/msm/qdsp6v2/q6afe.c @@ -178,6 +178,42 @@ done: return ret; } +static atomic_t tdm_gp_en_ref[IDX_GROUP_TDM_MAX]; + +static int afe_get_tdm_group_idx(u16 group_id) +{ + int gp_idx = -1; + + switch (group_id) { + case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_RX: + gp_idx = IDX_GROUP_PRIMARY_TDM_RX; + break; + case AFE_GROUP_DEVICE_ID_PRIMARY_TDM_TX: + gp_idx = IDX_GROUP_PRIMARY_TDM_TX; + break; + case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_RX: + gp_idx = IDX_GROUP_SECONDARY_TDM_RX; + break; + case AFE_GROUP_DEVICE_ID_SECONDARY_TDM_TX: + gp_idx = IDX_GROUP_SECONDARY_TDM_TX; + break; + case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_RX: + gp_idx = IDX_GROUP_TERTIARY_TDM_RX; + break; + case AFE_GROUP_DEVICE_ID_TERTIARY_TDM_TX: + gp_idx = IDX_GROUP_TERTIARY_TDM_TX; + break; + case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_RX: + gp_idx = IDX_GROUP_QUATERNARY_TDM_RX; + break; + case AFE_GROUP_DEVICE_ID_QUATERNARY_TDM_TX: + gp_idx = IDX_GROUP_QUATERNARY_TDM_TX; + break; + } + + return gp_idx; +} + int afe_get_topology(int port_id) { int topology; @@ -356,6 +392,7 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) return -EINVAL; } if (data->opcode == RESET_EVENTS) { + int i = 0; pr_debug("%s: reset event = %d %d apr[%pK]\n", __func__, data->reset_event, data->reset_proc, this_afe.apr); @@ -398,6 +435,12 @@ static int32_t afe_callback(struct apr_client_data *data, void *priv) this_afe.rx_cb = NULL; } + /* + * reset TDM group enable ref cnt + */ + for (i = 0; i < IDX_GROUP_TDM_MAX; i++) + atomic_set(&tdm_gp_en_ref[i], 0); + return 0; } afe_callback_debug_print(data); @@ -3864,11 +3907,36 @@ int afe_port_group_enable(u16 group_id, { struct afe_group_device_enable group_enable = {0}; struct param_hdr_v3 param_hdr = {0}; - int ret; + int ret = 0; + int gp_idx; pr_debug("%s: group id: 0x%x enable: %d\n", __func__, group_id, enable); + gp_idx = afe_get_tdm_group_idx(group_id); + + if ((gp_idx >= 0) && (gp_idx < IDX_GROUP_TDM_MAX)) { + + atomic_t *gp_ref = &tdm_gp_en_ref[gp_idx]; + + if (enable) + atomic_inc(gp_ref); + else + atomic_dec(gp_ref); + + if ((enable) && (atomic_read(gp_ref) > 1)) { + pr_err("%s: this TDM group is enabled already %d refs_cnt %d\n", + __func__, group_id, atomic_read(gp_ref)); + goto rtn; + } + + if ((!enable) && (atomic_read(gp_ref) > 0)) { + pr_err("%s: this TDM group will be disabled in last call %d refs_cnt %d\n", + __func__, group_id, atomic_read(gp_ref)); + goto rtn; + } + } + ret = afe_q6_interface_prepare(); if (ret != 0) { pr_err("%s: Q6 interface prepare failed %d\n", __func__, ret); @@ -3896,6 +3964,8 @@ int afe_port_group_enable(u16 group_id, pr_err("%s: AFE_PARAM_ID_GROUP_DEVICE_ENABLE failed %d\n", __func__, ret); +rtn: + return ret; } @@ -6556,6 +6626,10 @@ static int __init afe_init(void) pr_err("%s: could not init cal data! %d\n", __func__, ret); config_debug_fs_init(); + + for (i = 0; i < IDX_GROUP_TDM_MAX; i++) + atomic_set(&tdm_gp_en_ref[i], 0); + return 0; } diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c index b6615affe571..fde974d52bb2 100644 --- a/sound/soc/nuc900/nuc900-ac97.c +++ b/sound/soc/nuc900/nuc900-ac97.c @@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97, /* polling the AC_R_FINISH */ while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH) - && timeout--) + && --timeout) mdelay(1); if (!timeout) { @@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg, /* polling the AC_W_FINISH */ while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH) - && timeout--) + && --timeout) mdelay(1); if (!timeout) diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 38aae96267c9..df79d7c846ea 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -143,6 +143,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi, for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) { /* + * It will set SSIWSR.CONT here, but SSICR.CKDV = 000 + * with it is not allowed. (SSIWSR.WS_MODE with + * SSICR.CKDV = 000 is not allowed either). + * Skip it. See SSICR.CKDV + */ + if (j == 0) + continue; + + /* * this driver is assuming that * system word is 64fs (= 2 x 32bit) * see rsnd_ssi_init() @@ -444,6 +453,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); u32 *buf = (u32 *)(runtime->dma_area + rsnd_dai_pointer_offset(io, 0)); + int shift = 0; + + switch (runtime->sample_bits) { + case 32: + shift = 8; + break; + } /* * 8/16/32 data can be assesse to TDR/RDR register @@ -451,9 +467,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, * see rsnd_ssi_init() */ if (rsnd_io_is_play(io)) - rsnd_mod_write(mod, SSITDR, *buf); + rsnd_mod_write(mod, SSITDR, (*buf) << shift); else - *buf = rsnd_mod_read(mod, SSIRDR); + *buf = (rsnd_mod_read(mod, SSIRDR) >> shift); elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); } diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 8a59d4782a0f..69bf5cf1e91e 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"), } }, +{ + /* + * Bower's & Wilkins PX headphones only support the 48 kHz sample rate + * even though it advertises more. The capture interface doesn't work + * even on windows. + */ + USB_DEVICE(0x19b5, 0x0021), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_AUDIO_STANDARD_MIXER, + }, + /* Capture */ + { + .ifnum = 1, + .type = QUIRK_IGNORE_INTERFACE, + }, + /* Playback */ + { + .ifnum = 2, + .type = QUIRK_AUDIO_FIXED_ENDPOINT, + .data = &(const struct audioformat) { + .formats = SNDRV_PCM_FMTBIT_S16_LE, + .channels = 2, + .iface = 2, + .altsetting = 1, + .altset_idx = 1, + .attributes = UAC_EP_CS_ATTR_FILL_MAX | + UAC_EP_CS_ATTR_SAMPLE_RATE, + .endpoint = 0x03, + .ep_attr = USB_ENDPOINT_XFER_ISOC, + .rates = SNDRV_PCM_RATE_48000, + .rate_min = 48000, + .rate_max = 48000, + .nr_rates = 1, + .rate_table = (unsigned int[]) { + 48000 + } + } + }, + } + } +}, + #undef USB_DEVICE_VENDOR_SPEC diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c index 132afc97676c..9d4ac90ca87e 100644 --- a/tools/perf/builtin-probe.c +++ b/tools/perf/builtin-probe.c @@ -405,9 +405,9 @@ static int perf_del_probe_events(struct strfilter *filter) } if (ret == -ENOENT && ret2 == -ENOENT) - pr_debug("\"%s\" does not hit any event.\n", str); - /* Note that this is silently ignored */ - ret = 0; + pr_warning("\"%s\" does not hit any event.\n", str); + else + ret = 0; error: if (kfd >= 0) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ebe7115c751a..da8afc121118 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1152,6 +1152,10 @@ static struct syscall_fmt { { .name = "mlockall", .errmsg = true, .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, { .name = "mmap", .hexret = true, +/* The standard mmap maps to old_mmap on s390x */ +#if defined(__s390x__) + .alias = "old_mmap", +#endif .arg_scnprintf = { [0] = SCA_HEX, /* addr */ [2] = SCA_MMAP_PROT, /* prot */ [3] = SCA_MMAP_FLAGS, /* flags */ diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index a767a6400c5c..6ea4fcfaab36 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -182,6 +182,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, unsigned char buf2[BUFSZ]; size_t ret_len; u64 objdump_addr; + const char *objdump_name; + char decomp_name[KMOD_DECOMP_LEN]; int ret; pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); @@ -242,9 +244,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, state->done[state->done_cnt++] = al.map->start; } + objdump_name = al.map->dso->long_name; + if (dso__needs_decompress(al.map->dso)) { + if (dso__decompress_kmodule_path(al.map->dso, objdump_name, + decomp_name, + sizeof(decomp_name)) < 0) { + pr_debug("decompression failed\n"); + return -1; + } + + objdump_name = decomp_name; + } + /* Read the object code using objdump */ objdump_addr = map__rip_2objdump(al.map, al.addr); - ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); + ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); + + if (dso__needs_decompress(al.map->dso)) + unlink(objdump_name); + if (ret > 0) { /* * The kernel maps are inaccurate - assume objdump is right in diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c index 08c433b4bf4f..25e80c02230b 100644 --- a/tools/perf/tests/kmod-path.c +++ b/tools/perf/tests/kmod-path.c @@ -60,6 +60,7 @@ int test__kmod_path__parse(void) M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true); M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false); +#ifdef HAVE_ZLIB_SUPPORT /* path alloc_name alloc_ext kmod comp name ext */ T("/xxxx/xxxx/x.ko.gz", true , true , true, true, "[x]", "gz"); T("/xxxx/xxxx/x.ko.gz", false , true , true, true, NULL , "gz"); @@ -95,6 +96,7 @@ int test__kmod_path__parse(void) M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true); M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true); M("x.ko.gz", PERF_RECORD_MISC_USER, false); +#endif /* path alloc_name alloc_ext kmod comp name ext */ T("[test_module]", true , true , true, false, "[test_module]", NULL); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 26cba64345e3..46af9dde11e2 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -234,8 +234,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, if (machine__is_default_guest(machine)) return 0; - snprintf(filename, sizeof(filename), "%s/proc/%d/maps", - machine->root_dir, pid); + snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", + machine->root_dir, pid, pid); fp = fopen(filename, "r"); if (fp == NULL) { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 43838003c1a1..304f5d710143 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1258,8 +1258,16 @@ static int __event_process_build_id(struct build_id_event *bev, dso__set_build_id(dso, &bev->build_id); - if (!is_kernel_module(filename, cpumode)) - dso->kernel = dso_type; + if (dso_type != DSO_TYPE_USER) { + struct kmod_path m = { .name = NULL, }; + + if (!kmod_path__parse_name(&m, filename) && m.kmod) + dso__set_short_name(dso, strdup(m.name), true); + else + dso->kernel = dso_type; + + free(m.name); + } build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c index b1b9e2385f4b..5e58149c4df2 100644 --- a/tools/perf/util/ordered-events.c +++ b/tools/perf/util/ordered-events.c @@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe, static void free_dup_event(struct ordered_events *oe, union perf_event *event) { - if (oe->copy_on_queue) { + if (event && oe->copy_on_queue) { oe->cur_alloc_size -= event->header.size; free(event); } @@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve list_move(&event->list, &oe->cache); oe->nr_events--; free_dup_event(oe, event->event); + event->event = NULL; } int ordered_events__queue(struct ordered_events *oe, union perf_event *event, diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 03875f9154e7..0195b7e8c54a 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -2349,6 +2349,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base, out: free(nbase); + + /* Final validation */ + if (ret >= 0 && !is_c_func_name(buf)) { + pr_warning("Internal error: \"%s\" is an invalid event name.\n", + buf); + ret = -EINVAL; + } + return ret; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 010ff659b82f..4596496f6c0f 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -135,8 +135,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file, if (perf_session__open(session) < 0) goto out_close; - perf_session__set_id_hdr_size(session); - perf_session__set_comm_exec(session); + /* + * set session attributes that are present in perf.data + * but not in pipe-mode. + */ + if (!file->is_pipe) { + perf_session__set_id_hdr_size(session); + perf_session__set_comm_exec(session); + } } } else { session->machines.host.env = &perf_env; @@ -151,7 +157,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file, pr_warning("Cannot read kernel map\n"); } - if (tool && tool->ordering_requires_timestamps && + /* + * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is + * processed, so perf_evlist__sample_id_all is not meaningful here. + */ + if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps && tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_events = false; @@ -1411,6 +1421,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session) buf = malloc(cur_size); if (!buf) return -errno; + ordered_events__set_copy_on_queue(oe, true); more: event = buf; err = readn(fd, event, sizeof(struct perf_event_header)); diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 2d8ccd4d9e1b..87312056f75d 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -604,6 +604,9 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf, static int64_t sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) { + if (!left->branch_info || !right->branch_info) + return cmp_null(left->branch_info, right->branch_info); + return left->branch_info->flags.cycles - right->branch_info->flags.cycles; } @@ -611,6 +614,8 @@ sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right) static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { + if (!he->branch_info) + return scnprintf(bf, size, "%-.*s", width, "N/A"); if (he->branch_info->flags.cycles == 0) return repsep_snprintf(bf, size, "%-*s", width, "-"); return repsep_snprintf(bf, size, "%-*hd", width, diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 2dcfe9a7c8d0..60edec383281 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip, return 0; mod = dwfl_addrmodule(ui->dwfl, ip); + if (mod) { + Dwarf_Addr s; + + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); + if (s != al->map->start) + mod = 0; + } + if (!mod) mod = dwfl_report_elf(ui->dwfl, dso->short_name, dso->long_name, -1, al->map->start, diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 47b1e36c7ea0..9adc9af8b048 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -162,7 +162,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) size -= ret; off_in += ret; - off_out -= ret; + off_out += ret; } munmap(ptr, off_in + size); diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh index 856a1f327b3f..61f9b1dbbd9b 100755 --- a/tools/testing/selftests/firmware/fw_filesystem.sh +++ b/tools/testing/selftests/firmware/fw_filesystem.sh @@ -28,7 +28,10 @@ test_finish() if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout fi - echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path + if [ "$OLD_FWPATH" = "" ]; then + OLD_FWPATH=" " + fi + echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path rm -f "$FW" rmdir "$FWPATH" } diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c index 42d4c8caad81..de8dc82e2567 100644 --- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c @@ -45,12 +45,12 @@ int test_body(void) printf("Check DSCR TM context switch: "); fflush(stdout); for (;;) { - rv = 1; asm __volatile__ ( /* set a known value into the DSCR */ "ld 3, %[dscr1];" "mtspr %[sprn_dscr], 3;" + "li %[rv], 1;" /* start and suspend a transaction */ TBEGIN "beq 1f;" diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh index 3f81a1095206..50a6371b2b2e 100755 --- a/tools/testing/selftests/rcutorture/bin/configinit.sh +++ b/tools/testing/selftests/rcutorture/bin/configinit.sh @@ -51,7 +51,7 @@ then mkdir $builddir fi else - echo Bad build directory: \"$builddir\" + echo Bad build directory: \"$buildloc\" exit 2 fi fi diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c index d075ea0e5ca1..ade443a88421 100644 --- a/tools/testing/selftests/x86/entry_from_vm86.c +++ b/tools/testing/selftests/x86/entry_from_vm86.c @@ -95,6 +95,31 @@ asm ( "int3\n\t" "vmcode_int80:\n\t" "int $0x80\n\t" + "vmcode_popf_hlt:\n\t" + "push %ax\n\t" + "popf\n\t" + "hlt\n\t" + "vmcode_umip:\n\t" + /* addressing via displacements */ + "smsw (2052)\n\t" + "sidt (2054)\n\t" + "sgdt (2060)\n\t" + /* addressing via registers */ + "mov $2066, %bx\n\t" + "smsw (%bx)\n\t" + "mov $2068, %bx\n\t" + "sidt (%bx)\n\t" + "mov $2074, %bx\n\t" + "sgdt (%bx)\n\t" + /* register operands, only for smsw */ + "smsw %ax\n\t" + "mov %ax, (2080)\n\t" + "int3\n\t" + "vmcode_umip_str:\n\t" + "str %eax\n\t" + "vmcode_umip_sldt:\n\t" + "sldt %eax\n\t" + "int3\n\t" ".size vmcode, . - vmcode\n\t" "end_vmcode:\n\t" ".code32\n\t" @@ -103,7 +128,8 @@ asm ( extern unsigned char vmcode[], end_vmcode[]; extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[], - vmcode_sti[], vmcode_int3[], vmcode_int80[]; + vmcode_sti[], vmcode_int3[], vmcode_int80[], vmcode_popf_hlt[], + vmcode_umip[], vmcode_umip_str[], vmcode_umip_sldt[]; /* Returns false if the test was skipped. */ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, @@ -153,13 +179,75 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip, (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) { printf("[OK]\tReturned correctly\n"); } else { - printf("[FAIL]\tIncorrect return reason\n"); + printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n", eip, v86->regs.eip); nerrs++; } return true; } +void do_umip_tests(struct vm86plus_struct *vm86, unsigned char *test_mem) +{ + struct table_desc { + unsigned short limit; + unsigned long base; + } __attribute__((packed)); + + /* Initialize variables with arbitrary values */ + struct table_desc gdt1 = { .base = 0x3c3c3c3c, .limit = 0x9999 }; + struct table_desc gdt2 = { .base = 0x1a1a1a1a, .limit = 0xaeae }; + struct table_desc idt1 = { .base = 0x7b7b7b7b, .limit = 0xf1f1 }; + struct table_desc idt2 = { .base = 0x89898989, .limit = 0x1313 }; + unsigned short msw1 = 0x1414, msw2 = 0x2525, msw3 = 3737; + + /* UMIP -- exit with INT3 unless kernel emulation did not trap #GP */ + do_test(vm86, vmcode_umip - vmcode, VM86_TRAP, 3, "UMIP tests"); + + /* Results from displacement-only addressing */ + msw1 = *(unsigned short *)(test_mem + 2052); + memcpy(&idt1, test_mem + 2054, sizeof(idt1)); + memcpy(&gdt1, test_mem + 2060, sizeof(gdt1)); + + /* Results from register-indirect addressing */ + msw2 = *(unsigned short *)(test_mem + 2066); + memcpy(&idt2, test_mem + 2068, sizeof(idt2)); + memcpy(&gdt2, test_mem + 2074, sizeof(gdt2)); + + /* Results when using register operands */ + msw3 = *(unsigned short *)(test_mem + 2080); + + printf("[INFO]\tResult from SMSW:[0x%04x]\n", msw1); + printf("[INFO]\tResult from SIDT: limit[0x%04x]base[0x%08lx]\n", + idt1.limit, idt1.base); + printf("[INFO]\tResult from SGDT: limit[0x%04x]base[0x%08lx]\n", + gdt1.limit, gdt1.base); + + if (msw1 != msw2 || msw1 != msw3) + printf("[FAIL]\tAll the results of SMSW should be the same.\n"); + else + printf("[PASS]\tAll the results from SMSW are identical.\n"); + + if (memcmp(&gdt1, &gdt2, sizeof(gdt1))) + printf("[FAIL]\tAll the results of SGDT should be the same.\n"); + else + printf("[PASS]\tAll the results from SGDT are identical.\n"); + + if (memcmp(&idt1, &idt2, sizeof(idt1))) + printf("[FAIL]\tAll the results of SIDT should be the same.\n"); + else + printf("[PASS]\tAll the results from SIDT are identical.\n"); + + sethandler(SIGILL, sighandler, 0); + do_test(vm86, vmcode_umip_str - vmcode, VM86_SIGNAL, 0, + "STR instruction"); + clearhandler(SIGILL); + + sethandler(SIGILL, sighandler, 0); + do_test(vm86, vmcode_umip_sldt - vmcode, VM86_SIGNAL, 0, + "SLDT instruction"); + clearhandler(SIGILL); +} + int main(void) { struct vm86plus_struct v86; @@ -180,6 +268,9 @@ int main(void) v86.regs.ds = load_addr / 16; v86.regs.es = load_addr / 16; + /* Use the end of the page as our stack. */ + v86.regs.esp = 4096; + assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */ /* #BR -- should deliver SIG??? */ @@ -211,6 +302,23 @@ int main(void) v86.regs.eflags &= ~X86_EFLAGS_IF; do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set"); + /* POPF with VIP set but IF clear: should not trap */ + v86.regs.eflags = X86_EFLAGS_VIP; + v86.regs.eax = 0; + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP set and IF clear"); + + /* POPF with VIP set and IF set: should trap */ + v86.regs.eflags = X86_EFLAGS_VIP; + v86.regs.eax = X86_EFLAGS_IF; + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_STI, 0, "POPF with VIP and IF set"); + + /* POPF with VIP clear and IF set: should not trap */ + v86.regs.eflags = 0; + v86.regs.eax = X86_EFLAGS_IF; + do_test(&v86, vmcode_popf_hlt - vmcode, VM86_UNKNOWN, 0, "POPF with VIP clear and IF set"); + + v86.regs.eflags = 0; + /* INT3 -- should cause #BP */ do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3"); @@ -218,6 +326,9 @@ int main(void) v86.regs.eax = (unsigned int)-1; do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80"); + /* UMIP -- should exit with INTx 0x80 unless UMIP was not disabled */ + do_umip_tests(&v86, addr); + /* Execute a null pointer */ v86.regs.cs = 0; v86.regs.ss = 0; @@ -231,7 +342,7 @@ int main(void) clearhandler(SIGSEGV); /* Make sure nothing explodes if we fork. */ - if (fork() > 0) + if (fork() == 0) return 0; return (nerrs == 0 ? 0 : 1); diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c index 2a7cd2b8d966..8c5b0faba229 100644 --- a/tools/usb/usbip/src/usbipd.c +++ b/tools/usb/usbip/src/usbipd.c @@ -451,7 +451,7 @@ static void set_signal(void) sigaction(SIGTERM, &act, NULL); sigaction(SIGINT, &act, NULL); act.sa_handler = SIG_IGN; - sigaction(SIGCLD, &act, NULL); + sigaction(SIGCHLD, &act, NULL); } static const char *pid_file; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index d080f06fd8d9..b814ae6822b6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -902,8 +902,7 @@ int __kvm_set_memory_region(struct kvm *kvm, /* Check for overlaps */ r = -EEXIST; kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { - if ((slot->id >= KVM_USER_MEM_SLOTS) || - (slot->id == id)) + if (slot->id == id) continue; if (!((base_gfn + npages <= slot->base_gfn) || (base_gfn >= slot->base_gfn + slot->npages))) |
