diff options
305 files changed, 15068 insertions, 1416 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt index d442cf02a816..41cbb91351bb 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm.txt @@ -86,6 +86,9 @@ SoCs: - MSM8998 compatible = "qcom,msm8998" +- MSM8998_9x55 + compatible = "qcom,msm8998-9x55" + - MSMHAMSTER compatible = "qcom,msmhamster" @@ -270,6 +273,8 @@ compatible = "qcom,msm8998-rumi" compatible = "qcom,msm8998-cdp" compatible = "qcom,msm8998-mtp" compatible = "qcom,msm8998-qrd" +compatible = "qcom,msm8998-9x55-cdp" +compatible = "qcom,msm8998-9x55-mtp" compatible = "qcom,msmhamster-rumi" compatible = "qcom,msmhamster-cdp" compatible = "qcom,msmhamster-mtp" diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt index de5ab2c37967..2ef119e74bda 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt @@ -1,18 +1,20 @@ Qualcomm Technologies, Inc. IPC Router MHI Transport Required properties: --compatible: should be "qcom,ipc_router_mhi_xprt" --qcom,out-chan-id: MHI Channel ID for the transmit path --qcom,in-chan-id: MHI Channel ID for the receive path --qcom,xprt-remote: string that defines the edge of the transport (PIL Name) +-compatible: should be "qcom,ipc_router_mhi_xprt". +-qcom,mhi: phandle of MHI Device to connect to. +-qcom,out-chan-id: MHI Channel ID for the transmit path. +-qcom,in-chan-id: MHI Channel ID for the receive path. +-qcom,xprt-remote: string that defines the edge of the transport(PIL Name). -qcom,xprt-linkid: unique integer to identify the tier to which the link belongs to in the network and is used to avoid the - routing loops while forwarding the broadcast messages --qcom,xprt-version: unique version ID used by MHI transport header + routing loops while forwarding the broadcast messages. +-qcom,xprt-version: unique version ID used by MHI transport header. Example: qcom,ipc_router_external_modem_xprt2 { compatible = "qcom,ipc_router_mhi_xprt"; + qcom,mhi = <&mhi_wlan>; qcom,out-chan-id = <34>; qcom,in-chan-id = <35>; qcom,xprt-remote = "external-modem"; diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt index 6d63d1123f4c..6aa3bfe4b1d8 100644 --- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt +++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt @@ -11,8 +11,9 @@ the WLAN enable GPIO, 3.3V fixed voltage regulator resources. It also provides the reserved RAM dump memory location and size. Required properties: - - compatible: "qcom,cnss" - - wlan-en-gpio: WLAN_EN GPIO signal specified by QCA6174 specifications + - compatible: "qcom,cnss" for QCA6174 device + "qcom,cnss-qca6290" for QCA6290 device + - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications - vdd-wlan-supply: phandle to the regulator device tree node - pinctrl-names: Names corresponding to the numbered pinctrl states - pinctrl-<n>: Pinctrl states as described in @@ -44,6 +45,13 @@ Optional properties: which should be drived depending on platforms - qcom,is-dual-wifi-enabled: Boolean property to control wlan enable(wlan-en) gpio on dual-wifi platforms. + - vdd-wlan-en-supply: WLAN_EN fixed regulator specified by QCA6174 specifications. + - qcom,wlan-en-vreg-support: Boolean property to decide the whether the WLAN_EN pin + is a gpio or fixed regulator. + - qcom,mhi: phandle to indicate the device which needs MHI support. + - qcom,cap-tsf-gpio: WLAN_TSF_CAPTURED GPIO signal specified by the chip + specifications, should be drived depending on + products Example: @@ -60,4 +68,6 @@ Example: pinctrl-0 = <&cnss_default>; qcom,wlan-rc-num = <0>; qcom,wlan-smmu-iova-address = <0 0x10000000>; + qcom,mhi = <&mhi_wlan>; + qcom,cap-tsf-gpio = <&tlmm 126 1>; }; diff --git a/Documentation/devicetree/bindings/cnss/icnss.txt b/Documentation/devicetree/bindings/cnss/icnss.txt index c801e8486f87..700a8f7b077e 100644 --- a/Documentation/devicetree/bindings/cnss/icnss.txt +++ b/Documentation/devicetree/bindings/cnss/icnss.txt @@ -28,6 +28,7 @@ Optional properties: - qcom,icnss-vadc: VADC handle for vph_pwr read APIs. - qcom,icnss-adc_tm: VADC handle for vph_pwr notification APIs. - qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass + - qcom,wlan-msa-fixed-region: phandle, specifier pairs to children of /reserved-memory Example: @@ -54,6 +55,7 @@ Example: <0 140 0 /* CE10 */ >, <0 141 0 /* CE11 */ >; qcom,wlan-msa-memory = <0x200000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; vdd-0.8-cx-mx-supply = <&pm8998_l5>; qcom,vdd-0.8-cx-mx-config = <800000 800000 2400 1000>; diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt index 38e056cdc0ee..db21a2b58c2b 100644 --- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt +++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt @@ -1311,6 +1311,13 @@ Optional properties: - pinctrl-x: Defines pinctrl state for each pin group. + - qcom,msm-cpudai-tdm-clk-attribute: Clock attribute for tdm. + 0 - Clk invalid attribute + 1 - Clk attribute couple no + 2 - Clk attribute couple dividend + 3 - Clk attribute couple divisor + 4 - Clk attribute invert couple no + Example: qcom,msm-dai-tdm-quat-rx { diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt index 8f3ce9b3aa11..ffff1439076a 100644 --- a/Documentation/vm/page_owner.txt +++ b/Documentation/vm/page_owner.txt @@ -28,10 +28,11 @@ with page owner and page owner is disabled in runtime due to no enabling boot option, runtime overhead is marginal. If disabled in runtime, it doesn't require memory to store owner information, so there is no runtime memory overhead. And, page owner inserts just two unlikely branches into -the page allocator hotpath and if it returns false then allocation is -done like as the kernel without page owner. These two unlikely branches -would not affect to allocation performance. Following is the kernel's -code size change due to this facility. +the page allocator hotpath and if not enabled, then allocation is done +like as the kernel without page owner. These two unlikely branches should +not affect to allocation performance, especially if the static keys jump +label patching functionality is available. Following is the kernel's code +size change due to this facility. - Without page owner text data bss dec hex filename @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 74 +SUBLEVEL = 76 EXTRAVERSION = NAME = Blurry Fish Butt @@ -637,6 +637,12 @@ endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +endif + ifdef CONFIG_READABLE_ASM # Disable optimizations that make assembler listings hard to read. # reorder blocks reorders the control in the function @@ -792,12 +798,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 6f50f672efbd..de8ac998604d 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -54,14 +54,14 @@ timer@0200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x0200 0x100>; - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; clocks = <&clk_periph>; }; local-timer@0600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x0600 0x100>; - interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>; clocks = <&clk_periph>; }; diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile index c938988d6634..297d6535382e 100644 --- a/arch/arm/boot/dts/qcom/Makefile +++ b/arch/arm/boot/dts/qcom/Makefile @@ -170,7 +170,10 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \ apq8098-v2.1-mediabox.dtb \ msm8998-v2.1-interposer-sdm660-cdp.dtb \ msm8998-v2.1-interposer-sdm660-mtp.dtb \ - msm8998-v2.1-interposer-sdm660-qrd.dtb + msm8998-v2.1-interposer-sdm660-qrd.dtb \ + msm8998-9x55-rcm.dtb \ + msm8998-9x55-cdp.dtb \ + msm8998-9x55-mtp.dtb endif dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb diff --git a/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi new file mode 100644 index 000000000000..5e382f307530 --- /dev/null +++ b/arch/arm/boot/dts/qcom/dsi-adv7533-1024-600p.dtsi @@ -0,0 +1,75 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + +dsi_adv7533_1024_600p: qcom,mdss_dsi_adv7533_1024_600p { + label = "adv7533 1024x600p video mode dsi panel"; + qcom,mdss-dsi-panel-name = "dsi_adv7533_1024_600p"; + qcom,mdss-dsi-panel-controller = <&mdss_dsi0>; + qcom,mdss-dsi-panel-type = "dsi_video_mode"; + qcom,mdss-dsi-panel-destination = "display_1"; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-panel-width = <1024>; + qcom,mdss-dsi-panel-height = <600>; + qcom,mdss-dsi-h-front-porch = <110>; + qcom,mdss-dsi-h-back-porch = <220>; + qcom,mdss-dsi-h-pulse-width = <40>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <20>; + qcom,mdss-dsi-v-front-porch = <5>; + qcom,mdss-dsi-v-pulse-width = <5>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-on-command = [ + 05 01 00 00 c8 00 02 11 00 + 05 01 00 00 0a 00 02 29 00]; + qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00 + 05 01 00 00 00 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-h-sync-pulse = <1>; + qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-panel-timings = [ + AB 1A 10 00 3E 43 16 1E 15 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x03>; + qcom,mdss-dsi-t-clk-pre = <0x20>; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>; + qcom,mdss-pan-physical-width-dimension = <160>; + qcom,mdss-pan-physical-height-dimension = <90>; + qcom,mdss-dsi-force-clock-lane-hs; + qcom,mdss-dsi-always-on; + qcom,mdss-dsi-panel-timings-phy-v2 = [1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 19 02 03 01 03 04 a0 + 1c 08 02 03 01 03 04 a0]; + qcom,dba-panel; + qcom,bridge-name = "adv7533"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi index 0f18ba5c94c7..9cd117ce4e0c 100644 --- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi @@ -250,9 +250,8 @@ <0xd900 0x100>; reg-names = "qpnp-wled-ctrl-base", "qpnp-wled-sink-base"; - interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>, - <0x3 0xd8 0x2 IRQ_TYPE_EDGE_RISING>; - interrupt-names = "ovp-irq", "sc-irq"; + interrupts = <0x3 0xd8 0x1 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "ovp-irq"; linux,name = "wled"; linux,default-trigger = "bkl-trigger"; qcom,fdbk-output = "auto"; @@ -268,7 +267,6 @@ qcom,fs-curr-ua = <25000>; qcom,cons-sync-write-delay-us = <1000>; qcom,led-strings-list = [00 01 02]; - qcom,en-ext-pfet-sc-pro; qcom,loop-auto-gm-en; qcom,pmic-revid = <&pm660l_revid>; status = "ok"; diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 3b55215c7e5d..9dbec1e87fda 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -547,7 +547,7 @@ #include "msm8996-sde-display.dtsi" -&mdss_mdp { +&sde_kms { qcom,mdss-pref-prim-intf = "dsi"; qcom,sde-plane-id-map { qcom,sde-plane-id@0 { @@ -869,6 +869,9 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <1>; + qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <1>; + qcom,ntn-phy-addr = <7>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; @@ -1238,7 +1241,7 @@ qcom,vin-sel = <2>; /* 1.8 */ qcom,out-strength = <1>; qcom,src-sel = <0>; /* GPIO */ - qcom,master-en = <0>; /* Disable GPIO */ + qcom,master-en = <1>; /* Enable GPIO */ status = "okay"; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index 4fe6f0d67fbe..ad14bfd00cd1 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -333,7 +333,7 @@ }; }; -&mdss_mdp { +&sde_kms { qcom,mdss-pref-prim-intf = "dsi"; qcom,sde-plane-id-map { qcom,sde-plane-id@0 { @@ -635,6 +635,8 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <1>; qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <1>; + qcom,ntn-phy-addr = <7>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; @@ -649,6 +651,7 @@ qcom,ntn-rst-delay-msec = <100>; qcom,ntn-rc-num = <2>; qcom,ntn-bus-num = <1>; + qcom,ntn-mdio-bus-id = <2>; qcom,msm-bus,name = "ntn"; qcom,msm-bus,num-cases = <2>; diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi index 18a0f29e4d8a..94be1082c2be 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -24,6 +24,7 @@ #include "dsi-panel-nt35597-dsc-wqxga-cmd.dtsi" #include "dsi-panel-hx8379a-truly-fwvga-video.dtsi" #include "dsi-panel-r69007-dualdsi-wqxga-cmd.dtsi" +#include "dsi-adv7533-1024-600p.dtsi" #include "dsi-adv7533-720p.dtsi" #include "dsi-adv7533-1080p.dtsi" #include "dsi-panel-nt35950-dsc-4k-cmd.dtsi" diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi index ab10a71d1fd7..0bd9b02f3d2e 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -343,7 +343,7 @@ qcom,mdss-pref-prim-intf = "dsi"; }; -&mdss_hdmi { +&sde_hdmi { status = "ok"; }; diff --git a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi index 061301f1c479..1c81bc433374 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi @@ -94,8 +94,8 @@ label = "dsi_dual_sharp_video"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -118,8 +118,8 @@ label = "single_dsi_sim"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -140,8 +140,8 @@ label = "single_dsi_toshiba_720p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -161,8 +161,8 @@ label = "single_dsi_jdi_1080p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -180,8 +180,8 @@ label = "single_dsi_sharp_1080p"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -209,8 +209,8 @@ qcom,display-type = "primary"; /* dsi1/dsi0 swapped due to IMGSWAP */ - qcom,dsi-ctrl = <&mdss_dsi1 &mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi1 &sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -231,8 +231,8 @@ label = "dsi_dual_nt35597_video"; qcom,display-type = "primary"; - qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi0 &sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy0 &sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -253,8 +253,8 @@ label = "dsi_adv_7533_1"; qcom,display-type = "secondary"; - qcom,dsi-ctrl = <&mdss_dsi0>; - qcom,dsi-phy = <&mdss_dsi_phy0>; + qcom,dsi-ctrl = <&sde_dsi0>; + qcom,dsi-phy = <&sde_dsi_phy0>; clocks = <&clock_mmss clk_ext_byte0_clk_src>, <&clock_mmss clk_ext_pclk0_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -269,8 +269,8 @@ label = "dsi_adv_7533_2"; qcom,display-type = "tertiary"; - qcom,dsi-ctrl = <&mdss_dsi1>; - qcom,dsi-phy = <&mdss_dsi_phy1>; + qcom,dsi-ctrl = <&sde_dsi1>; + qcom,dsi-phy = <&sde_dsi_phy1>; clocks = <&clock_mmss clk_ext_byte1_clk_src>, <&clock_mmss clk_ext_pclk1_clk_src>; clock-names = "src_byte_clk", "src_pixel_clk"; @@ -297,8 +297,8 @@ }; }; -&mdss_mdp { - connectors = <&mdss_hdmi &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>; +&sde_kms { + connectors = <&sde_hdmi_tx &sde_hdmi &dsi_adv_7533_1 &dsi_adv_7533_2>; }; &dsi_dual_sharp_video { diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi index f0fa5dcb2224..6d0e5c4cb920 100644 --- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi @@ -11,7 +11,7 @@ */ &soc { - mdss_mdp: qcom,mdss_mdp@900000 { + sde_kms: qcom,sde_kms@900000 { compatible = "qcom,sde-kms"; reg = <0x00900000 0x90000>, <0x009b0000 0x1040>, @@ -180,8 +180,8 @@ }; }; - smmu_mdp_unsec: qcom,smmu_mdp_unsec_cb { - compatible = "qcom,smmu_mdp_unsec"; + smmu_kms_unsec: qcom,smmu_kms_unsec_cb { + compatible = "qcom,smmu_kms_unsec"; iommus = <&mdp_smmu 0>; }; @@ -211,7 +211,7 @@ }; }; - mdss_dsi0: qcom,mdss_dsi_ctrl0@994000 { + sde_dsi0: qcom,sde_dsi_ctrl0@994000 { compatible = "qcom,dsi-ctrl-hw-v1.4"; label = "dsi-ctrl-0"; cell-index = <0>; @@ -246,7 +246,7 @@ <22 512 0 0>, <22 512 0 1000>; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <4 0>; qcom,core-supply-entries { #address-cells = <1>; @@ -287,7 +287,7 @@ }; }; - mdss_dsi1: qcom,mdss_dsi_ctrl1@996000 { + sde_dsi1: qcom,sde_dsi_ctrl1@996000 { compatible = "qcom,dsi-ctrl-hw-v1.4"; label = "dsi-ctrl-1"; cell-index = <1>; @@ -321,7 +321,7 @@ <22 512 0 0>, <22 512 0 1000>; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <5 0>; qcom,core-supply-entries { #address-cells = <1>; @@ -361,7 +361,7 @@ }; }; - mdss_dsi_phy0: qcom,mdss_dsi_phy0@994400 { + sde_dsi_phy0: qcom,sde_dsi_phy0@994400 { compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-0"; cell-index = <0>; @@ -420,7 +420,7 @@ }; }; - mdss_dsi_phy1: qcom,mdss_dsi_phy1@996400 { + sde_dsi_phy1: qcom,sde_dsi_phy1@996400 { compatible = "qcom,dsi-phy-v4.0"; label = "dsi-phy-1"; cell-index = <1>; @@ -479,7 +479,7 @@ }; }; - mdss_hdmi: qcom,hdmi_tx@9a0000 { + sde_hdmi_tx: qcom,hdmi_tx_8996@9a0000 { compatible = "qcom,hdmi-tx-8996"; reg = <0x009a0000 0x50c>, @@ -499,7 +499,7 @@ "core_clk", "alt_iface_clk", "extp_clk"; - interrupt-parent = <&mdss_mdp>; + interrupt-parent = <&sde_kms>; interrupts = <8 0>; hpd-gdsc-supply = <&gdsc_mdss>; qcom,hdmi-tx-hpd-gpio = <&pm8994_mpps 4 0>; @@ -511,23 +511,8 @@ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; - hdmi_audio: qcom,msm-hdmi-audio-rx { + sde_hdmi_audio: qcom,sde-hdmi-audio-rx { compatible = "qcom,msm-hdmi-audio-codec-rx"; }; }; }; - -/* dummy nodes for compatibility with 8996 mdss dtsi */ -&soc { - mdss_dsi: qcom,mdss_dsi_dummy { - /* dummy node for backward compatibility */ - }; - - mdss_hdmi_tx: qcom,mdss_hdmi_tx_dummy { - /* dummy node for backward compatibility */ - }; - - mdss_fb2: qcom,mdss_fb2_dummy { - /* dummy node for backward compatibility */ - }; -}; diff --git a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi index 9725bc3ee530..698c0193a164 100644 --- a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -480,7 +480,7 @@ gdsc-venus-supply = <&gdsc_venus>; }; -&mdss_hdmi { +&sde_hdmi_tx { hpd-gdsc-venus-supply = <&gdsc_venus>; }; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index 80b3437beac6..dcd884b4a745 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -242,6 +242,7 @@ #include "msm8996-ion.dtsi" #include "msm8996-sde.dtsi" +#include "msm8996-mdss.dtsi" #include "msm8996-mdss-pll.dtsi" #include "msm8996-smp2p.dtsi" #include "msm8996-ipcrouter.dtsi" diff --git a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi index 32adb9a36dd4..355062adf7ef 100644 --- a/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996v3-auto.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016 - 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -167,3 +167,40 @@ < 560000000 7 >, < 624000000 7 >; }; + +&soc { + ipa_hw: qcom,ipa@680000 { + compatible = "qcom,ipa"; + reg = <0x680000 0x4effc>, + <0x684000 0x26934>; + reg-names = "ipa-base", "bam-base"; + interrupts = <0 333 0>, + <0 432 0>; + interrupt-names = "ipa-irq", "bam-irq"; + qcom,ipa-hw-ver = <5>; /* IPA core version = IPAv2.5 */ + qcom,ipa-hw-mode = <0>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,ipa-bam-remote-mode; + qcom,modem-cfg-emb-pipe-flt; + clocks = <&clock_gcc clk_ipa_clk>; + clock-names = "core_clk"; + qcom,use-dma-zone; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <2>; + qcom,msm-bus,vectors-KBps = + <90 512 0 0>, <90 585 0 0>, /* No vote */ + <90 512 80000 640000>, <90 585 80000 640000>, /* SVS */ + <90 512 206000 960000>, <90 585 206000 960000>; /* PERF */ + qcom,bus-vector-names = "MIN", "SVS", "PERF"; + }; + + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa"; + qcom,rmnet-ipa-ssr; + qcom,ipa-loaduC; + qcom,ipa-advertise-sg-support; + }; +}; + diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts new file mode 100644 index 000000000000..cf167897bb89 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-cdp.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-cdp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 CDP"; + compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; + qcom,board-id= <1 2>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts new file mode 100644 index 000000000000..a95e9e4f272f --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-mtp.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-mtp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 MTP"; + compatible = "qcom,msm8998-9x55-mtp", "qcom,msm8998-9x55", "qcom,mtp"; + qcom,board-id= <8 6>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts new file mode 100644 index 000000000000..094ecbc50061 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55-rcm.dts @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; + +#include "msm8998-9x55.dtsi" +#include "msm8998-mdss-panels.dtsi" +#include "msm8998-cdp.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55 RCM"; + compatible = "qcom,msm8998-9x55-cdp", "qcom,msm8998-9x55", "qcom,cdp"; + qcom,board-id= <0x21 2>; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi new file mode 100644 index 000000000000..be947507e398 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msm8998-9x55.dtsi @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include "skeleton64.dtsi" +#include "msm8998-v2.1.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. MSM8998-9x55"; + compatible = "qcom,msm8998-9x55"; + qcom,msm-id = <292 0x0>; + interrupt-parent = <&intc>; + + soc: soc { }; +}; diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi index 6ff62544b03c..0859fd638a00 100644 --- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi @@ -149,6 +149,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi index bafe29211ef0..3827b1bbf8ba 100644 --- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi @@ -150,6 +150,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi index 97c4c5b1d455..c4a428635231 100644 --- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi @@ -143,6 +143,16 @@ qcom,out-strength = <1>; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi index a3eb3e5ab0d0..3c6b23d9581c 100644 --- a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi @@ -139,6 +139,16 @@ status = "okay"; }; + gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */ + qcom,mode = <0>; /* Input */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "okay"; + }; + /* GPIO 21 (NFC_CLK_REQ) */ gpio@d400 { qcom,mode = <0>; diff --git a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi index f39f8b880690..f10477bab8cf 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi @@ -24,7 +24,7 @@ qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; - qcom,saw2-avs-ctl = <0x1010031>; + qcom,saw2-avs-ctl = <0x101c031>; qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; @@ -40,7 +40,7 @@ qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; - qcom,saw2-avs-ctl = <0x1010031>; + qcom,saw2-avs-ctl = <0x101c031>; qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 56a6d5105568..cff3aa809488 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -321,16 +321,22 @@ #size-cells = <2>; ranges; - removed_region0: removed_region0@85800000 { + wlan_msa_guard: wlan_msa_guard@85600000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x85800000 0x0 0x700000>; + reg = <0x0 0x85600000 0x0 0x100000>; }; - removed_region1: removed_region1@86000000 { + wlan_msa_mem: wlan_msa_mem@85700000 { compatible = "removed-dma-pool"; no-map; - reg = <0x0 0x86000000 0x0 0x2f00000>; + reg = <0x0 0x85700000 0x0 0x100000>; + }; + + removed_region: removed_region0@85800000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85800000 0x0 0x3700000>; }; modem_fw_mem: modem_fw_region@8ac00000 { @@ -1298,9 +1304,18 @@ compatible = "qcom,clk-cpu-osm-sdm630"; reg = <0x179c0000 0x4000>, <0x17916000 0x1000>, <0x17816000 0x1000>, <0x179d1000 0x1000>, - <0x00784130 0x8>; + <0x00784130 0x8>, <0x17914800 0x800>, + <0x17814800 0x800>; reg-names = "osm", "pwrcl_pll", "perfcl_pll", - "apcs_common", "perfcl_efuse"; + "apcs_common", "perfcl_efuse", + "pwrcl_acd", "perfcl_acd"; + + qcom,acdtd-val = <0x0000a111 0x0000a111>; + qcom,acdcr-val = <0x002c5ffd 0x002c5ffd>; + qcom,acdsscr-val = <0x00000901 0x00000901>; + qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>; + qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>; + qcom,acdautoxfer-val = <0x00000015 0x00000015>; vdd-pwrcl-supply = <&apc0_pwrcl_vreg>; vdd-perfcl-supply = <&apc1_perfcl_vreg>; @@ -1674,6 +1689,7 @@ qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; qcom,wlan-msa-memory = <0x100000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; }; diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi index 3debda8c084d..f14c9a32c2f9 100644 --- a/arch/arm/boot/dts/qcom/sdm660.dtsi +++ b/arch/arm/boot/dts/qcom/sdm660.dtsi @@ -319,6 +319,18 @@ #size-cells = <2>; ranges; + wlan_msa_guard: wlan_msa_guard@85600000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85600000 0x0 0x100000>; + }; + + wlan_msa_mem: wlan_msa_mem@85700000 { + compatible = "removed-dma-pool"; + no-map; + reg = <0x0 0x85700000 0x0 0x100000>; + }; + removed_regions: removed_regions@85800000 { compatible = "removed-dma-pool"; no-map; @@ -1928,6 +1940,7 @@ qcom,vdd-1.3-rfa-config = <1200000 1370000>; qcom,vdd-3.3-ch0-config = <3200000 3400000>; qcom,wlan-msa-memory = <0x100000>; + qcom,wlan-msa-fixed-region = <&wlan_msa_mem>; qcom,smmu-s1-bypass; }; @@ -2140,6 +2153,7 @@ qcom,qsee-ce-hw-instance = <0>; qcom,disk-encrypt-pipe-pair = <2>; qcom,support-fde; + qcom,fde-key-size; qcom,no-clock-support; qcom,msm-bus,name = "qseecom-noc"; qcom,msm-bus,num-cases = <4>; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 221b11bb50e3..f353849d9388 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1197,15 +1197,15 @@ void __init sanity_check_meminfo(void) high_memory = __va(arm_lowmem_limit - 1) + 1; + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + /* * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the * last full pmd, which should be mapped. */ - if (memblock_limit) - memblock_limit = round_down(memblock_limit, PMD_SIZE); - if (!memblock_limit) - memblock_limit = arm_lowmem_limit; + memblock_limit = round_down(memblock_limit, PMD_SIZE); memblock_set_current_limit(memblock_limit); } diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 5692f0dcd65e..894cb466b075 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -16,7 +16,6 @@ CONFIG_RCU_NOCB_CPU_ALL=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 -CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y CONFIG_CPUSETS=y CONFIG_CGROUP_CPUACCT=y diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index aee323b13802..0a11cd502dbc 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -22,9 +22,9 @@ #define ACPI_MADT_GICC_LENGTH \ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) -#define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ - (entry)->header.length != ACPI_MADT_GICC_LENGTH) +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c index 3cedd1f95e0f..8ae4067a5eda 100644 --- a/arch/mips/ath79/common.c +++ b/arch/mips/ath79/common.c @@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void) { BUG_ON(!ath79_ddr_pci_win_base); - __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0); - __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1); - __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2); - __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3); - __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4); - __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5); - __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6); - __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7); + __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0); + __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4); + __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8); + __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc); + __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10); + __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14); + __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18); + __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c); } EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows); diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 7791840cf22c..db07793f7b43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include <asm/asm.h> #include <asm/asmmacro.h> #include <asm/compiler.h> +#include <asm/irqflags.h> #include <asm/regdef.h> #include <asm/mipsregs.h> #include <asm/stackframe.h> @@ -137,6 +138,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -173,6 +175,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index f63a289977cc..0b3e58a3189f 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -625,7 +624,6 @@ static int __init cps_gen_core_entries(unsigned cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -645,16 +643,11 @@ static int __init cps_gen_core_entries(unsigned cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 99a402231f4d..31ca2edd7218 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -194,6 +194,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index dfb04fcedb04..48d6349fd9d7 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c @@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = { }; static struct rt2880_pmx_func pwm1_grp_mt7628[] = { - FUNC("sdcx", 3, 19, 1), + FUNC("sdxc d6", 3, 19, 1), FUNC("utif", 2, 19, 1), FUNC("gpio", 1, 19, 1), - FUNC("pwm", 0, 19, 1), + FUNC("pwm1", 0, 19, 1), }; static struct rt2880_pmx_func pwm0_grp_mt7628[] = { - FUNC("sdcx", 3, 18, 1), + FUNC("sdxc d7", 3, 18, 1), FUNC("utif", 2, 18, 1), FUNC("gpio", 1, 18, 1), - FUNC("pwm", 0, 18, 1), + FUNC("pwm0", 0, 18, 1), }; static struct rt2880_pmx_func uart2_grp_mt7628[] = { - FUNC("sdcx", 3, 20, 2), + FUNC("sdxc d5 d4", 3, 20, 2), FUNC("pwm", 2, 20, 2), FUNC("gpio", 1, 20, 2), - FUNC("uart", 0, 20, 2), + FUNC("uart2", 0, 20, 2), }; static struct rt2880_pmx_func uart1_grp_mt7628[] = { - FUNC("sdcx", 3, 45, 2), + FUNC("sw_r", 3, 45, 2), FUNC("pwm", 2, 45, 2), FUNC("gpio", 1, 45, 2), - FUNC("uart", 0, 45, 2), + FUNC("uart1", 0, 45, 2), }; static struct rt2880_pmx_func i2c_grp_mt7628[] = { @@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = { static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) }; static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) }; -static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) }; +static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { FUNC("jtag", 3, 22, 8), FUNC("utif", 2, 22, 8), FUNC("gpio", 1, 22, 8), - FUNC("sdcx", 0, 22, 8), + FUNC("sdxc", 0, 22, 8), }; static struct rt2880_pmx_func uart0_grp_mt7628[] = { FUNC("-", 3, 12, 2), FUNC("-", 2, 12, 2), FUNC("gpio", 1, 12, 2), - FUNC("uart", 0, 12, 2), + FUNC("uart0", 0, 12, 2), }; static struct rt2880_pmx_func i2s_grp_mt7628[] = { @@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { FUNC("-", 3, 6, 1), FUNC("refclk", 2, 6, 1), FUNC("gpio", 1, 6, 1), - FUNC("spi", 0, 6, 1), + FUNC("spi cs1", 0, 6, 1), }; static struct rt2880_pmx_func spis_grp_mt7628[] = { @@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = { FUNC("gpio", 0, 11, 1), }; -#define MT7628_GPIO_MODE_MASK 0x3 - -#define MT7628_GPIO_MODE_PWM1 30 -#define MT7628_GPIO_MODE_PWM0 28 -#define MT7628_GPIO_MODE_UART2 26 -#define MT7628_GPIO_MODE_UART1 24 -#define MT7628_GPIO_MODE_I2C 20 -#define MT7628_GPIO_MODE_REFCLK 18 -#define MT7628_GPIO_MODE_PERST 16 -#define MT7628_GPIO_MODE_WDT 14 -#define MT7628_GPIO_MODE_SPI 12 -#define MT7628_GPIO_MODE_SDMODE 10 -#define MT7628_GPIO_MODE_UART0 8 -#define MT7628_GPIO_MODE_I2S 6 -#define MT7628_GPIO_MODE_CS1 4 -#define MT7628_GPIO_MODE_SPIS 2 -#define MT7628_GPIO_MODE_GPIO 0 +static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { + FUNC("rsvd", 3, 35, 1), + FUNC("rsvd", 2, 35, 1), + FUNC("gpio", 1, 35, 1), + FUNC("wled_kn", 0, 35, 1), +}; + +static struct rt2880_pmx_func wled_an_grp_mt7628[] = { + FUNC("rsvd", 3, 44, 1), + FUNC("rsvd", 2, 44, 1), + FUNC("gpio", 1, 44, 1), + FUNC("wled_an", 0, 44, 1), +}; + +#define MT7628_GPIO_MODE_MASK 0x3 + +#define MT7628_GPIO_MODE_WLED_KN 48 +#define MT7628_GPIO_MODE_WLED_AN 32 +#define MT7628_GPIO_MODE_PWM1 30 +#define MT7628_GPIO_MODE_PWM0 28 +#define MT7628_GPIO_MODE_UART2 26 +#define MT7628_GPIO_MODE_UART1 24 +#define MT7628_GPIO_MODE_I2C 20 +#define MT7628_GPIO_MODE_REFCLK 18 +#define MT7628_GPIO_MODE_PERST 16 +#define MT7628_GPIO_MODE_WDT 14 +#define MT7628_GPIO_MODE_SPI 12 +#define MT7628_GPIO_MODE_SDMODE 10 +#define MT7628_GPIO_MODE_UART0 8 +#define MT7628_GPIO_MODE_I2S 6 +#define MT7628_GPIO_MODE_CS1 4 +#define MT7628_GPIO_MODE_SPIS 2 +#define MT7628_GPIO_MODE_GPIO 0 static struct rt2880_pmx_group mt7628an_pinmux_data[] = { - GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, + GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_PWM1), - GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, + GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_PWM0), GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_UART2), @@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = { 1, MT7628_GPIO_MODE_SPIS), GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, 1, MT7628_GPIO_MODE_GPIO), + GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_WLED_AN), + GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, + 1, MT7628_GPIO_MODE_WLED_KN), { 0 } }; @@ -439,7 +459,7 @@ void __init ralink_clk_init(void) ralink_clk_add("10000c00.uartlite", periph_rate); ralink_clk_add("10180000.wmac", xtal_rate); - if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) { + if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) { /* * When the CPU goes into sleep mode, the BUS clock will be * too low for USB to function properly. Adjust the busses diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c index 15506a1ff22a..9dd67749c592 100644 --- a/arch/mips/ralink/rt288x.c +++ b/arch/mips/ralink/rt288x.c @@ -109,5 +109,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info) soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; rt2880_pinmux_data = rt2880_pinmux_data_act; - ralink_soc == RT2880_SOC; + ralink_soc = RT2880_SOC; } diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 98949b0df00a..6696c1986844 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -304,9 +304,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) * * For pHyp, we have to enable IO for log retrieval. Otherwise, * 0xFF's is always returned from PCI config space. + * + * When the @severity is EEH_LOG_PERM, the PE is going to be + * removed. Prior to that, the drivers for devices included in + * the PE will be closed. The drivers rely on working IO path + * to bring the devices to quiet state. Otherwise, PCI traffic + * from those devices after they are removed is like to cause + * another unexpected EEH error. */ if (!(pe->type & EEH_PE_PHB)) { - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || + severity == EEH_LOG_PERM) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); /* diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 7c053f281406..1138fec3dd65 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) #endif #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } @@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3c3a367b6e59..396dc44e783b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2693,6 +2693,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 4c48b487698c..0b48ce40d351 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -179,6 +179,16 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) b slb_finish_load 8: /* invalid EA */ + /* + * It's possible the bad EA is too large to fit in the SLB cache, which + * would mean we'd fail to invalidate it on context switch. So mark the + * SLB cache as full so we force a full flush. We also set cr7+eq to + * mark the address as a kernel address, so slb_finish_load() skips + * trying to insert it into the SLB cache. + */ + li r9,SLB_CACHE_ENTRIES + 1 + sth r9,PACASLBCACHEPTR(r13) + crset 4*cr7+eq li r10,0 /* BAD_VSID */ li r9,0 /* BAD_VSID */ li r11,SLB_VSID_USER /* flags don't much matter */ diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7befcb76..19d14ac23ef9 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -221,6 +221,9 @@ struct x86_emulate_ops { void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); + + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -290,7 +293,6 @@ struct x86_emulate_ctxt { /* interruptibility state, as a result of execution of STI or MOV SS */ int interruptibility; - int emul_flags; bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 1dcea225977d..04b2f3cad7ba 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2531,7 +2531,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) u64 smbase; int ret; - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); /* @@ -2580,11 +2580,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; } - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt->ops->set_nmi_mask(ctxt, false); - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; - ctxt->emul_flags &= ~X86EMUL_SMM_MASK; + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); return X86EMUL_CONTINUE; } @@ -5296,6 +5296,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; + unsigned emul_flags; ctxt->mem_read.pos = 0; @@ -5310,6 +5311,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } + emul_flags = ctxt->ops->get_hflags(ctxt); if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || @@ -5343,7 +5345,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5372,7 +5374,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5426,7 +5428,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) special_insn: - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index ab38af4f4947..23a7c7ba377a 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 50ca8f409a7c..bbaa11f4e74b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2264,7 +2264,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6c82792487e9..8e526c6fd784 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4844,6 +4844,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, if (var.unusable) { memset(desc, 0, sizeof(*desc)); + if (base3) + *base3 = 0; return false; } @@ -4999,6 +5001,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); } +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) +{ + return emul_to_vcpu(ctxt)->arch.hflags; +} + +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) +{ + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); +} + static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, @@ -5038,6 +5050,8 @@ static const struct x86_emulate_ops emulate_ops = { .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, .set_nmi_mask = emulator_set_nmi_mask, + .get_hflags = emulator_get_hflags, + .set_hflags = emulator_set_hflags, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -5090,7 +5104,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); - ctxt->emul_flags = vcpu->arch.hflags; init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; @@ -5486,8 +5499,6 @@ restart: unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - if (vcpu->arch.hflags != ctxt->emul_flags) - kvm_set_hflags(vcpu, ctxt->emul_flags); kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); @@ -5974,7 +5985,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, + &ctxt->exception); } static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index ef05755a1900..7ed47b1e6f42 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void *)-1) { + if (info->si_addr == (void __user *)-1) { err = -EINVAL; goto err_out; } @@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void) if (!kernel_managing_mpx_tables(current->mm)) return -EINVAL; - if (do_mpx_bt_fault()) { - force_sig(SIGSEGV, current); - /* - * The force_sig() is essentially "handling" this - * exception, so we do not pass up the error - * from do_mpx_bt_fault(). - */ - } - return 0; + return do_mpx_bt_fault(); } /* diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 5fb6adaaa796..5a760fd66bec 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -134,8 +134,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, { struct flush_tlb_info info; - if (end == 0) - end = start + PAGE_SIZE; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; @@ -264,7 +262,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); preempt_enable(); } diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 0c89ab992012..a84172106e0f 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -212,6 +212,7 @@ struct fastrpc_channel_ctx { struct device *dev; struct fastrpc_session_ctx session[NUM_SESSIONS]; struct completion work; + struct completion workport; struct notifier_block nb; struct kref kref; int channel; @@ -318,7 +319,6 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = { .channel = SMD_APPS_DSPS, .link.link_info.edge = "dsps", .link.link_info.transport = "smem", - .vmid = VMID_SSC_Q6, }, { .name = "cdsprpc-smd", @@ -1475,6 +1475,7 @@ static void fastrpc_init(struct fastrpc_apps *me) me->channel = &gcinfo[0]; for (i = 0; i < NUM_CHANNELS; i++) { init_completion(&me->channel[i].work); + init_completion(&me->channel[i].workport); me->channel[i].sesscount = 0; } } @@ -2136,7 +2137,7 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event) switch (event) { case GLINK_CONNECTED: link->port_state = FASTRPC_LINK_CONNECTED; - complete(&me->channel[cid].work); + complete(&me->channel[cid].workport); break; case GLINK_LOCAL_DISCONNECTED: link->port_state = FASTRPC_LINK_DISCONNECTED; @@ -2286,8 +2287,7 @@ static void fastrpc_glink_close(void *chan, int cid) return; link = &gfa.channel[cid].link; - if (link->port_state == FASTRPC_LINK_CONNECTED || - link->port_state == FASTRPC_LINK_CONNECTING) { + if (link->port_state == FASTRPC_LINK_CONNECTED) { link->port_state = FASTRPC_LINK_DISCONNECTING; glink_close(chan); } @@ -2485,8 +2485,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) if (err) goto bail; - VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work, - RPC_TIMEOUT)); + VERIFY(err, + wait_for_completion_timeout(&me->channel[cid].workport, + RPC_TIMEOUT)); if (err) { me->channel[cid].chan = 0; goto bail; diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index 20e617ed0770..e37609abf7bf 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -796,7 +796,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last); + mutex_unlock(&driver->md_session_lock); } end: return write_len; @@ -856,7 +858,9 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -950,7 +954,9 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_event_mask_update(i); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -997,7 +1003,9 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_event_mask_update(i); + mutex_unlock(&driver->md_session_lock); } memcpy(dest_buf, &header, sizeof(header)); write_len += sizeof(header); @@ -1251,7 +1259,9 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_log_mask_update(i, req->equip_id); + mutex_unlock(&driver->md_session_lock); } end: return write_len; @@ -1302,7 +1312,9 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len, for (i = 0; i < NUM_PERIPHERALS; i++) { if (!diag_check_update(i)) continue; + mutex_lock(&driver->md_session_lock); diag_send_log_mask_update(i, ALL_EQUIP_ID); + mutex_unlock(&driver->md_session_lock); } return write_len; @@ -1966,9 +1978,11 @@ void diag_send_updates_peripheral(uint8_t peripheral) diag_send_feature_mask_update(peripheral); if (driver->time_sync_enabled) diag_send_time_sync_update(peripheral); + mutex_lock(&driver->md_session_lock); diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID); diag_send_log_mask_update(peripheral, ALL_EQUIP_ID); diag_send_event_mask_update(peripheral); + mutex_unlock(&driver->md_session_lock); diag_send_real_time_update(peripheral, driver->real_time_mode[DIAG_LOCAL_PROC]); diag_send_peripheral_buffering_mode( diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c index 06b83f5230bf..a27f12883c8d 100644 --- a/drivers/char/diag/diag_memorydevice.c +++ b/drivers/char/diag/diag_memorydevice.c @@ -129,37 +129,6 @@ void diag_md_close_all() diag_ws_reset(DIAG_WS_MUX); } -static int diag_md_get_peripheral(int ctxt) -{ - int peripheral; - - if (driver->num_pd_session) { - peripheral = GET_PD_CTXT(ctxt); - switch (peripheral) { - case UPD_WLAN: - case UPD_AUDIO: - case UPD_SENSORS: - break; - case DIAG_ID_MPSS: - case DIAG_ID_LPASS: - case DIAG_ID_CDSP: - default: - peripheral = - GET_BUF_PERIPHERAL(ctxt); - if (peripheral > NUM_PERIPHERALS) - peripheral = -EINVAL; - break; - } - } else { - /* Account for Apps data as well */ - peripheral = GET_BUF_PERIPHERAL(ctxt); - if (peripheral > NUM_PERIPHERALS) - peripheral = -EINVAL; - } - - return peripheral; -} - int diag_md_write(int id, unsigned char *buf, int len, int ctx) { int i; diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c index d6f6ea7af8ea..8cc803eef552 100644 --- a/drivers/char/diag/diag_mux.c +++ b/drivers/char/diag/diag_mux.c @@ -153,12 +153,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx) upd = PERIPHERAL_CDSP; break; case UPD_WLAN: - if (!driver->num_pd_session) + if (!driver->pd_logging_mode[0]) upd = PERIPHERAL_MODEM; break; case UPD_AUDIO: + if (!driver->pd_logging_mode[1]) + upd = PERIPHERAL_LPASS; + break; case UPD_SENSORS: - if (!driver->num_pd_session) + if (!driver->pd_logging_mode[2]) upd = PERIPHERAL_LPASS; break; default: diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c index 8ab22ee3bc3c..afaedc99a4e7 100644 --- a/drivers/char/diag/diagchar_core.c +++ b/drivers/char/diag/diagchar_core.c @@ -456,6 +456,7 @@ static void diag_close_logging_process(const int pid) { int i, j; int session_mask; + uint32_t p_mask; struct diag_md_session_t *session_info = NULL; struct diag_logging_mode_param_t params; @@ -475,6 +476,9 @@ static void diag_close_logging_process(const int pid) session_mask = session_info->peripheral_mask; diag_md_session_close(session_info); + p_mask = + diag_translate_kernel_to_user_mask(session_mask); + for (i = 0; i < NUM_MD_SESSIONS; i++) if (MD_PERIPHERAL_MASK(i) & session_mask) diag_mux_close_peripheral(DIAG_LOCAL_PROC, i); @@ -482,19 +486,17 @@ static void diag_close_logging_process(const int pid) params.req_mode = USB_MODE; params.mode_param = 0; params.pd_mask = 0; - params.peripheral_mask = - diag_translate_kernel_to_user_mask(session_mask); + params.peripheral_mask = p_mask; if (driver->num_pd_session > 0) { - for (i = UPD_WLAN; ((i < NUM_MD_SESSIONS) && - (session_mask & MD_PERIPHERAL_MASK(i))); - i++) { - j = i - UPD_WLAN; - driver->pd_session_clear[j] = 1; - driver->pd_logging_mode[j] = 0; - driver->num_pd_session -= 1; - params.pd_mask = - diag_translate_kernel_to_user_mask(session_mask); + for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) { + if (session_mask & MD_PERIPHERAL_MASK(i)) { + j = i - UPD_WLAN; + driver->pd_session_clear[j] = 1; + driver->pd_logging_mode[j] = 0; + driver->num_pd_session -= 1; + params.pd_mask = p_mask; + } } } @@ -1020,6 +1022,11 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len, else hdlc_disabled = driver->hdlc_disabled; if (hdlc_disabled) { + if (len < 4) { + pr_err("diag: In %s, invalid len: %d of non_hdlc pkt", + __func__, len); + return -EBADMSG; + } payload = *(uint16_t *)(buf + 2); if (payload > DIAG_MAX_HDLC_BUF_SIZE) { pr_err("diag: Dropping packet, payload size is %d\n", @@ -1028,11 +1035,21 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len, } driver->hdlc_encode_buf_len = payload; /* - * Adding 4 bytes for start (1 byte), version (1 byte) and - * payload (2 bytes) + * Adding 5 bytes for start (1 byte), version (1 byte), + * payload (2 bytes) and end (1 byte) */ - memcpy(driver->hdlc_encode_buf, buf + 4, payload); - goto send_data; + if (len == (payload + 5)) { + /* + * Adding 4 bytes for start (1 byte), version (1 byte) + * and payload (2 bytes) + */ + memcpy(driver->hdlc_encode_buf, buf + 4, payload); + goto send_data; + } else { + pr_err("diag: In %s, invalid len: %d of non_hdlc pkt", + __func__, len); + return -EBADMSG; + } } if (hdlc_flag) { diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index e209039bed5a..9ac1ad62ffe0 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -216,6 +216,45 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len) return buf->len; } +int diag_md_get_peripheral(int ctxt) +{ + int peripheral; + + if (driver->num_pd_session) { + peripheral = GET_PD_CTXT(ctxt); + switch (peripheral) { + case UPD_WLAN: + if (!driver->pd_logging_mode[0]) + peripheral = PERIPHERAL_MODEM; + break; + case UPD_AUDIO: + if (!driver->pd_logging_mode[1]) + peripheral = PERIPHERAL_LPASS; + break; + case UPD_SENSORS: + if (!driver->pd_logging_mode[2]) + peripheral = PERIPHERAL_LPASS; + break; + case DIAG_ID_MPSS: + case DIAG_ID_LPASS: + case DIAG_ID_CDSP: + default: + peripheral = + GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + break; + } + } else { + /* Account for Apps data as well */ + peripheral = GET_BUF_PERIPHERAL(ctxt); + if (peripheral > NUM_PERIPHERALS) + peripheral = -EINVAL; + } + + return peripheral; +} + static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, struct diagfwd_buf_t *buf, int len) { @@ -245,13 +284,15 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info, mutex_lock(&driver->hdlc_disable_mutex); mutex_lock(&fwd_info->data_mutex); - peripheral = GET_PD_CTXT(buf->ctxt); - if (peripheral == DIAG_ID_MPSS) - peripheral = PERIPHERAL_MODEM; - if (peripheral == DIAG_ID_LPASS) - peripheral = PERIPHERAL_LPASS; - if (peripheral == DIAG_ID_CDSP) - peripheral = PERIPHERAL_CDSP; + peripheral = diag_md_get_peripheral(buf->ctxt); + if (peripheral < 0) { + pr_err("diag:%s:%d invalid peripheral = %d\n", + __func__, __LINE__, peripheral); + mutex_unlock(&fwd_info->data_mutex); + mutex_unlock(&driver->hdlc_disable_mutex); + diag_ws_release(); + return; + } session_info = diag_md_session_get_peripheral(peripheral); diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h index 037eeebdeb35..eda70dcfdcd9 100644 --- a/drivers/char/diag/diagfwd_peripheral.h +++ b/drivers/char/diag/diagfwd_peripheral.h @@ -105,6 +105,9 @@ void diagfwd_early_open(uint8_t peripheral); void diagfwd_late_open(struct diagfwd_info *fwd_info); void diagfwd_close(uint8_t peripheral, uint8_t type); + +int diag_md_get_peripheral(int ctxt); + int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type, void *ctxt, struct diag_peripheral_ops *ops, struct diagfwd_info **fwd_ctxt); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 31e8ae916ba0..be0b09a0fb44 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index d6d425773fa4..5b2db3c6568f 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) rate = clk_get_rate(s3c_freq->hclk); if (rate < 133 * 1000 * 1000) { pr_err("cpufreq: HCLK not at 133MHz\n"); - clk_put(s3c_freq->hclk); ret = -EINVAL; goto err_armclk; } diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index f99a421c0ee4..7dd8d3d3f6ad 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -441,7 +441,7 @@ static int qcom_ice_enable(struct ice_device *ice_dev) (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) { reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS); if ((reg & 0x80000000) != 0x0) { - pr_err("%s: Bypass failed for ice = %p", + pr_err("%s: Bypass failed for ice = %pK", __func__, (void *)ice_dev); BUG(); } @@ -467,7 +467,7 @@ static int qcom_ice_verify_ice(struct ice_device *ice_dev) } ice_dev->ice_hw_version = rev; - dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n", + dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%pK\n", maj_rev, min_rev, step_rev, ice_dev->mmio); @@ -1256,7 +1256,7 @@ static void qcom_ice_debug(struct platform_device *pdev) goto out; } - pr_err("%s: =========== REGISTER DUMP (%p)===========\n", + pr_err("%s: =========== REGISTER DUMP (%pK)===========\n", ice_dev->ice_instance_type, ice_dev); pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n", @@ -1570,7 +1570,7 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node) struct ice_device *ice_dev = NULL; if (!node) { - pr_err("%s: invalid node %p", __func__, node); + pr_err("%s: invalid node %pK", __func__, node); goto out; } @@ -1587,13 +1587,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node) list_for_each_entry(ice_dev, &ice_devices, list) { if (ice_dev->pdev->of_node == node) { - pr_info("%s: found ice device %p\n", __func__, ice_dev); + pr_info("%s: found ice device %pK\n", __func__, + ice_dev); break; } } ice_pdev = to_platform_device(ice_dev->pdev); - pr_info("%s: matching platform device %p\n", __func__, ice_pdev); + pr_info("%s: matching platform device %pK\n", __func__, ice_pdev); out: return ice_pdev; } @@ -1632,7 +1633,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) } ret = regulator_enable(ice_dev->reg); if (ret) { - pr_err("%s:%p: Could not enable regulator\n", + pr_err("%s:%pK: Could not enable regulator\n", __func__, ice_dev); goto out; } @@ -1640,7 +1641,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) /* Setup Clocks */ if (qcom_ice_enable_clocks(ice_dev, true)) { - pr_err("%s:%p:%s Could not enable clocks\n", __func__, + pr_err("%s:%pK:%s Could not enable clocks\n", __func__, ice_dev, ice_dev->ice_instance_type); goto out_reg; } @@ -1652,7 +1653,7 @@ static int enable_ice_setup(struct ice_device *ice_dev) ret = qcom_ice_set_bus_vote(ice_dev, vote); if (ret) { - pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret); + pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret); goto out_clocks; } @@ -1684,19 +1685,19 @@ static int disable_ice_setup(struct ice_device *ice_dev) /* Setup Bus Vote */ vote = qcom_ice_get_bus_vote(ice_dev, "MIN"); if (vote < 0) { - pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev); + pr_err("%s:%pK: Unable to get bus vote\n", __func__, ice_dev); goto out_disable_clocks; } ret = qcom_ice_set_bus_vote(ice_dev, vote); if (ret) - pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret); + pr_err("%s:%pK: failed %d\n", __func__, ice_dev, ret); out_disable_clocks: /* Setup Clocks */ if (qcom_ice_enable_clocks(ice_dev, false)) - pr_err("%s:%p:%s Could not disable clocks\n", __func__, + pr_err("%s:%pK:%s Could not disable clocks\n", __func__, ice_dev, ice_dev->ice_instance_type); /* Setup Regulator */ @@ -1707,7 +1708,7 @@ out_disable_clocks: } ret = regulator_disable(ice_dev->reg); if (ret) { - pr_err("%s:%p: Could not disable regulator\n", + pr_err("%s:%pK: Could not disable regulator\n", __func__, ice_dev); goto out; } diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c index a568bf46f09f..96297fe7eaad 100644 --- a/drivers/crypto/msm/ota_crypto.c +++ b/drivers/crypto/msm/ota_crypto.c @@ -172,7 +172,7 @@ static int qcota_release(struct inode *inode, struct file *file) podev = file->private_data; if (podev != NULL && podev->magic != OTA_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); } @@ -444,7 +444,7 @@ static long qcota_ioctl(struct file *file, podev = file->private_data; if (podev == NULL || podev->magic != OTA_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); return -ENOENT; } diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index ee7e735761e2..4ab8ca143f6c 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -1,6 +1,6 @@ /* Qualcomm Crypto Engine driver. * - * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -294,11 +294,11 @@ static int _probe_ce_engine(struct qce_device *pce_dev) pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE; dev_info(pce_dev->pdev, - "CE device = 0x%x\n, " - "IO base, CE = 0x%p\n, " + "CE device = 0x%x\n" + "IO base, CE = 0x%pK\n" "Consumer (IN) PIPE %d, " "Producer (OUT) PIPE %d\n" - "IO base BAM = 0x%p\n" + "IO base BAM = 0x%pK\n" "BAM IRQ %d\n" "Engines Availability = 0x%x\n", pce_dev->ce_bam_info.ce_device, @@ -1160,7 +1160,7 @@ static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info) #define QCE_WRITE_REG(val, addr) \ { \ - pr_info(" [0x%p] 0x%x\n", addr, (uint32_t)val); \ + pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \ writel_relaxed(val, addr); \ } @@ -2730,7 +2730,7 @@ static int qce_sps_init_ep_conn(struct qce_device *pce_dev, sps_event->callback = NULL; } - pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n", + pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n", is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)", (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base); goto out; @@ -2895,7 +2895,7 @@ static int qce_sps_get_bam(struct qce_device *pce_dev) bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL; bam.options |= SPS_BAM_CACHED_WP; pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr); - pr_debug("bam virtual base=0x%p\n", bam.virt_addr); + pr_debug("bam virtual base=0x%pK\n", bam.virt_addr); /* Register CE Peripheral BAM device to SPS driver */ rc = sps_register_bam_device(&bam, &pbam->handle); @@ -2998,7 +2998,7 @@ static void print_notify_debug(struct sps_event_notify *notify) phys_addr_t addr = DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags, notify->data.transfer.iovec.addr); - pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n", + pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n", notify->event_id, &addr, notify->data.transfer.iovec.size, notify->data.transfer.iovec.flags, diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c index beeb99e479c7..20bf034bb193 100644 --- a/drivers/crypto/msm/qcedev.c +++ b/drivers/crypto/msm/qcedev.c @@ -201,7 +201,7 @@ static int qcedev_release(struct inode *inode, struct file *file) handle = file->private_data; podev = handle->cntl; if (podev != NULL && podev->magic != QCEDEV_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); } kzfree(handle); @@ -1607,7 +1607,7 @@ long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) podev = handle->cntl; qcedev_areq.handle = handle; if (podev == NULL || podev->magic != QCEDEV_MAGIC) { - pr_err("%s: invalid handle %p\n", + pr_err("%s: invalid handle %pK\n", __func__, podev); return -ENOENT; } diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index b5c5dc035c66..5b364f053b1b 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -262,7 +262,7 @@ static void qcrypto_free_req_control(struct crypto_engine *pce, preq->arsp = NULL; /* free req */ if (xchg(&preq->in_use, false) == false) { - pr_warn("request info %p free already\n", preq); + pr_warn("request info %pK free already\n", preq); } else { atomic_dec(&pce->req_count); } @@ -1720,7 +1720,7 @@ static void _qce_ahash_complete(void *cookie, unsigned char *digest, } #ifdef QCRYPTO_DEBUG - dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n", + dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %pK ret %d\n", areq, ret); #endif if (digest) { @@ -1779,7 +1779,7 @@ static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, } #ifdef QCRYPTO_DEBUG - dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n", + dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %pK ret %d\n", areq, ret); #endif if (iv) @@ -2479,7 +2479,7 @@ static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2509,7 +2509,7 @@ static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2539,7 +2539,7 @@ static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2727,7 +2727,7 @@ static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2757,7 +2757,7 @@ static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -2787,7 +2787,7 @@ static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req) BUG_ON(crypto_tfm_alg_type(req->base.tfm) != CRYPTO_ALG_TYPE_ABLKCIPHER); #ifdef QCRYPTO_DEBUG - dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req); + dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %pK\n", req); #endif if ((ctx->enc_key_len == AES_KEYSIZE_192) && @@ -3353,7 +3353,7 @@ static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) #ifdef QCRYPTO_DEBUG dev_info(&ctx->pengine->pdev->dev, - "_qcrypto_aead_encrypt_aes_cbc: %p\n", req); + "_qcrypto_aead_encrypt_aes_cbc: %pK\n", req); #endif rctx = aead_request_ctx(req); @@ -3384,7 +3384,7 @@ static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) #ifdef QCRYPTO_DEBUG dev_info(&ctx->pengine->pdev->dev, - "_qcrypto_aead_decrypt_aes_cbc: %p\n", req); + "_qcrypto_aead_decrypt_aes_cbc: %pK\n", req); #endif rctx = aead_request_ctx(req); rctx->aead = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 51a9942cdb40..f4cae5357e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -681,6 +681,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 25a3e2485cc2..2bc17a907ecf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index 49aa35016653..247b088990dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index b92139e9b9d8..b5c64edeb668 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,7 +113,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; - bool DisableP2A; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 6c021165ca67..498a94069e6b 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); @@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Check P2A Access */ - ast->DisableP2A = true; - data = ast_read32(ast, 0xf004); - if (data != 0xFFFFFFFF) - ast->DisableP2A = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->DisableP2A == false) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; - } + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; } break; } @@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - if (ast->DisableP2A) - { + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: ast->dram_bus_width = 16; ast->dram_type = AST_DRAM_1Gx16; ast->mclk = 396; + return 0; } - else - { - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) - ast->dram_bus_width = 16; - else - ast->dram_bus_width = 32; - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; break; - case 2: + default: case 1: - div = 0x2; + ast->dram_type = AST_DRAM_1Gx16; break; - default: - div = 0x1; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; break; } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 270e8fb2803f..c7c58becb25d 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev) ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->DisableP2A == false) - { + if (ast->config_mode == ast_use_p2a) { if (ast->chip == AST2300 || ast->chip == AST2400) ast_init_dram_2300(dev); else ast_init_dram_reg(dev); ast_init_3rdtx(dev); - } - else - { + } else { if (ast->tx_chip_type != AST_TX_NONE) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 2ee75a2c1039..1b295949122b 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -556,13 +556,13 @@ static const struct file_operations sde_hdmi_hdcp_state_fops = { .read = _sde_hdmi_hdcp_state_read, }; -static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in) +static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in) { u32 pclk_delta, pclk; u64 pclk_clip = pclk_in; /* as per standard, 0.5% of deviation is allowed */ - pclk = mode->clock * HDMI_KHZ_TO_HZ; + pclk = hdmi->pixclock; pclk_delta = pclk * 5 / 1000; if (pclk_in < (pclk - pclk_delta)) @@ -700,7 +700,6 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work) static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm) { struct hdmi *hdmi = display->ctrl.ctrl; - struct drm_display_mode *current_mode = &display->mode; u64 cur_pclk, dst_pclk; u64 clip_pclk; int rc = 0; @@ -725,7 +724,7 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm) dst_pclk = cur_pclk * (1000000000 + ppm); do_div(dst_pclk, 1000000000); - clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk); + clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk); /* update pclk */ if (clip_pclk != cur_pclk) { @@ -1893,11 +1892,6 @@ struct drm_msm_ext_panel_hdr_metadata *hdr_meta) return; } - if (!connector->hdr_supported) { - SDE_ERROR("Sink does not support HDR\n"); - return; - } - /* Setup Packet header and payload */ packet_header = type_code | (version << 8) | (length << 16); hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header); @@ -2131,9 +2125,13 @@ static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi) } } - SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__, + if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4) + sde_hdmi->dc_feature_supported = true; + + SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__, hdmi_disabled ? "OFF" : "ON", - hdcp_disabled ? "OFF" : "ON"); + hdcp_disabled ? "OFF" : "ON", + sde_hdmi->dc_feature_supported ? "ON" : "OFF"); if (hdmi_disabled) { DEV_ERR("%s: HDMI disabled\n", __func__); @@ -2308,8 +2306,14 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector, struct msm_display_kickoff_params *params) { - sde_hdmi_panel_set_hdr_infoframe(display, - params->hdr_metadata); + if (!connector || !display || !params) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + if (connector->hdr_supported) + sde_hdmi_panel_set_hdr_infoframe(display, + params->hdr_metadata); return 0; } diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 6b9bcfec031b..743d34d05e57 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -33,6 +33,9 @@ #include "sde_hdmi_util.h" #include "sde_hdcp.h" +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) +#endif #ifdef HDMI_DEBUG_ENABLE #define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args) #else @@ -110,6 +113,8 @@ enum hdmi_tx_feature_type { * @client_notify_pending: If there is client notification pending. * @irq_domain: IRQ domain structure. * @pll_update_enable: if it's allowed to update HDMI PLL ppm. + * @dc_enable: If deep color is enabled. Only DC_30 so far. + * @dc_feature_supported: If deep color feature is supported. * @notifier: CEC notifider to convey physical address information. * @root: Debug fs root entry. */ @@ -161,6 +166,8 @@ struct sde_hdmi { struct irq_domain *irq_domain; struct cec_notifier *notifier; bool pll_update_enable; + bool dc_enable; + bool dc_feature_supported; struct delayed_work hdcp_cb_work; struct dss_io_data io[HDMI_TX_MAX_IO]; @@ -188,6 +195,8 @@ enum hdmi_tx_scdc_access_type { #define HDMI_KHZ_TO_HZ 1000 #define HDMI_MHZ_TO_HZ 1000000 +#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2 +#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1 /* Maximum pixel clock rates for hdmi tx */ #define HDMI_DEFAULT_MAX_PCLK_RATE 148500 diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index 24d2320683e4..a4c3c2e7ce46 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -345,7 +345,9 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, return 0; } - if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) { + /* use actual clock instead of mode clock */ + if (hdmi->pixclock > + HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) { scrambler_on = true; tmds_clock_ratio = 1; } else { @@ -402,6 +404,38 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, return rc; } +static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi) +{ + struct drm_connector *connector = hdmi->connector; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + u32 hdmi_ctrl_reg, vbi_pkt_reg; + + SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off"); + + if (display->dc_enable) { + hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL); + + /* GC CD override */ + hdmi_ctrl_reg |= BIT(27); + + /* enable deep color for RGB888/YUV444/YUV420 30 bits */ + hdmi_ctrl_reg |= BIT(24); + hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg); + /* Enable GC_CONT and GC_SEND in General Control Packet + * (GCP) register so that deep color data is + * transmitted to the sink on every frame, allowing + * the sink to decode the data correctly. + * + * GC_CONT: 0x1 - Send GCP on every frame + * GC_SEND: 0x1 - Enable GCP Transmission + */ + vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL); + vbi_pkt_reg |= BIT(5) | BIT(4); + hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg); + } +} + static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge) { struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); @@ -543,6 +577,10 @@ static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi, struct hdmi_avi_infoframe info; drm_hdmi_avi_infoframe_from_display_mode(&info, mode); + + if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + info.colorspace = HDMI_COLORSPACE_YUV420; + hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe)); checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1]; @@ -660,31 +698,77 @@ static inline void _sde_hdmi_save_mode(struct hdmi *hdmi, drm_mode_copy(&display->mode, mode); } +static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi, + struct drm_display_mode *mode) +{ + /* + * choose priority: + * 1. DC + RGB + * 2. DC + YUV + * 3. RGB + * 4. YUV + */ + int dc_format; + struct drm_connector *connector = hdmi->connector; + + dc_format = sde_hdmi_sink_dc_support(connector, mode); + if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE) + return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444 + | MSM_MODE_FLAG_RGB444_DC_ENABLE); + else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE) + return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 + | MSM_MODE_FLAG_YUV420_DC_ENABLE); + else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) + return MSM_MODE_FLAG_COLOR_FORMAT_RGB444; + else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) + return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420; + + SDE_ERROR("Can't get available best display format\n"); + + return MSM_MODE_FLAG_COLOR_FORMAT_RGB444; +} + static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + struct drm_connector *connector = hdmi->connector; + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; int hstart, hend, vstart, vend; uint32_t frame_ctrl; + u32 div = 0; mode = adjusted_mode; - hdmi->pixclock = mode->clock * 1000; + display->dc_enable = mode->private_flags & + (MSM_MODE_FLAG_RGB444_DC_ENABLE | + MSM_MODE_FLAG_YUV420_DC_ENABLE); + /* compute pixclock as per color format and bit depth */ + hdmi->pixclock = sde_hdmi_calc_pixclk( + mode->clock * HDMI_KHZ_TO_HZ, + mode->private_flags, + display->dc_enable); + SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n", + hdmi->pixclock, mode->clock); + + if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + div = 1; - hstart = mode->htotal - mode->hsync_start; - hend = mode->htotal - mode->hsync_start + mode->hdisplay; + hstart = (mode->htotal - mode->hsync_start) >> div; + hend = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div; vstart = mode->vtotal - mode->vsync_start - 1; vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1; - DRM_DEBUG( + SDE_DEBUG( "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d", mode->htotal, mode->vtotal, hstart, hend, vstart, vend); hdmi_write(hdmi, REG_HDMI_TOTAL, - SDE_HDMI_TOTAL_H_TOTAL(mode->htotal - 1) | + SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) | SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1)); hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC, @@ -734,6 +818,22 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge, _sde_hdmi_save_mode(hdmi, mode); _sde_hdmi_bridge_setup_scrambler(hdmi, mode); + _sde_hdmi_bridge_setup_deep_color(hdmi); +} + +static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge); + struct hdmi *hdmi = sde_hdmi_bridge->hdmi; + + adjusted_mode->private_flags |= + _sde_hdmi_choose_best_format(hdmi, adjusted_mode); + SDE_DEBUG("Adjusted mode private flags: 0x%x\n", + adjusted_mode->private_flags); + + return true; } static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { @@ -742,6 +842,7 @@ static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = { .disable = _sde_hdmi_bridge_disable, .post_disable = _sde_hdmi_bridge_post_disable, .mode_set = _sde_hdmi_bridge_mode_set, + .mode_fixup = _sde_hdmi_bridge_mode_fixup, }; diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c index a7887d2c84b0..ba47b7702efd 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c @@ -825,3 +825,76 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display) } return rc; } + +unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq, + u32 out_format, bool dc_enable) +{ + u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO; + + if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO; + + pixel_freq /= rate_ratio; + + if (dc_enable) + pixel_freq += pixel_freq >> 2; + + return pixel_freq; + +} + +bool sde_hdmi_validate_pixclk(struct drm_connector *connector, + unsigned long pclk) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ; + + if (connector->max_tmds_char) + max_pclk = MIN(max_pclk, + connector->max_tmds_char * HDMI_MHZ_TO_HZ); + else if (connector->max_tmds_clock) + max_pclk = MIN(max_pclk, + connector->max_tmds_clock * HDMI_MHZ_TO_HZ); + + SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk); + + return pclk < max_pclk; +} + +static bool sde_hdmi_check_dc_clock(struct drm_connector *connector, + struct drm_display_mode *mode, u32 format) +{ + struct sde_connector *c_conn = to_sde_connector(connector); + struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display; + + u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk( + mode->clock * HDMI_KHZ_TO_HZ, + format, + true); + + return (display->dc_feature_supported && + sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc)); +} + +int sde_hdmi_sink_dc_support(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int dc_format = 0; + + if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) && + (connector->display_info.edid_hdmi_dc_modes + & DRM_EDID_YCBCR420_DC_30)) + if (sde_hdmi_check_dc_clock(connector, mode, + MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) + dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE; + + if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) && + (connector->display_info.edid_hdmi_dc_modes + & DRM_EDID_HDMI_DC_30)) + if (sde_hdmi_check_dc_clock(connector, mode, + MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) + dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE; + + return dc_format; +} diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h index 8b69dd31f637..10effed54a14 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h @@ -164,4 +164,10 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display); void sde_hdmi_ddc_config(void *hdmi_display); int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display); void sde_hdmi_dump_regs(void *hdmi_display); +unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq, + u32 out_format, bool dc_enable); +bool sde_hdmi_validate_pixclk(struct drm_connector *connector, + unsigned long pclk); +int sde_hdmi_sink_dc_support(struct drm_connector *connector, + struct drm_display_mode *mode); #endif /* _SDE_HDMI_UTIL_H_ */ diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 2ab50919f514..ed0ba928f170 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -34,6 +34,24 @@ #define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0) /* Transition to new mode requires a wait-for-vblank before the modeset */ #define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1) +/* + * We need setting some flags in bridge, and using them in encoder. Add them in + * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are + * flags that indicating the SINK supported color formats read from EDID. While, + * these flags defined here indicate the best color/bit depth foramt we choosed + * that would be better for display. For example the best mode display like: + * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at + * the same time. And also RGB_DC only set when RGB format is set,the same for + * YUV_DC. + */ +/* Enable RGB444 30 bit deep color */ +#define MSM_MODE_FLAG_RGB444_DC_ENABLE (1<<2) +/* Enable YUV420 30 bit deep color */ +#define MSM_MODE_FLAG_YUV420_DC_ENABLE (1<<3) +/* Choose RGB444 format to display */ +#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444 (1<<4) +/* Choose YUV420 format to display */ +#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 (1<<5) /* As there are different display controller blocks depending on the * snapdragon version, the kms support is split out and the appropriate diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 5a49a1beee49..5323c0194594 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1385,6 +1385,7 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3) sde_kms_info_add_keystr(info, "qseed_type", "qseed3"); sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split); + sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr); if (catalog->perf.max_bw_low) sde_kms_info_add_keyint(info, "max_bandwidth_low", catalog->perf.max_bw_low); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 29444a83cf02..97c9f8baea6d 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -55,6 +55,33 @@ #define MAX_CHANNELS_PER_ENC 2 +/* rgb to yuv color space conversion matrix */ +static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = { + [SDE_CSC_RGB2YUV_601L] = { + { + TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032), + TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1), + TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb), + }, + { 0x0, 0x0, 0x0,}, + { 0x0040, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,}, + }, + + [SDE_CSC_RGB2YUV_601FR] = { + { + TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a), + TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100), + TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6), + }, + { 0x0, 0x0, 0x0,}, + { 0x0000, 0x0200, 0x0200,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,}, + }, +}; + /** * struct sde_encoder_virt - virtual encoder. Container of one or more physical * encoders. Virtual encoder manages one "logical" display. Physical @@ -1374,3 +1401,108 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder) return INTF_MODE_NONE; } + +/** + * sde_encoder_phys_setup_cdm - setup chroma down block + * @phys_enc: Pointer to physical encoder + * @output_type: HDMI/WB + * @format: Output format + * @roi: Output size + */ +void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, + const struct sde_format *format, u32 output_type, + struct sde_rect *roi) +{ + struct drm_encoder *encoder = phys_enc->parent; + struct sde_encoder_virt *sde_enc = NULL; + struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm; + struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg; + int ret; + u32 csc_type = 0; + + if (!encoder) { + SDE_ERROR("invalid encoder\n"); + return; + } + sde_enc = to_sde_encoder_virt(encoder); + + if (!SDE_FORMAT_IS_YUV(format)) { + SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n", + format->base.pixel_format); + + if (hw_cdm && hw_cdm->ops.disable) + hw_cdm->ops.disable(hw_cdm); + + return; + } + + memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg)); + + cdm_cfg->output_width = roi->w; + cdm_cfg->output_height = roi->h; + cdm_cfg->output_fmt = format; + cdm_cfg->output_type = output_type; + cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ? + CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; + + /* enable 10 bit logic */ + switch (cdm_cfg->output_fmt->chroma_sample) { + case SDE_CHROMA_RGB: + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case SDE_CHROMA_H2V1: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + case SDE_CHROMA_420: + cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; + cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; + break; + case SDE_CHROMA_H1V2: + default: + SDE_ERROR("unsupported chroma sampling type\n"); + cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; + cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; + break; + } + + SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", + cdm_cfg->output_width, + cdm_cfg->output_height, + cdm_cfg->output_fmt->base.pixel_format, + cdm_cfg->output_type, + cdm_cfg->output_bit_depth, + cdm_cfg->h_cdwn_type, + cdm_cfg->v_cdwn_type); + + if (output_type == CDM_CDWN_OUTPUT_HDMI) + csc_type = SDE_CSC_RGB2YUV_601FR; + else if (output_type == CDM_CDWN_OUTPUT_WB) + csc_type = SDE_CSC_RGB2YUV_601L; + + if (hw_cdm && hw_cdm->ops.setup_csc_data) { + ret = hw_cdm->ops.setup_csc_data(hw_cdm, + &sde_csc_10bit_convert[csc_type]); + if (ret < 0) { + SDE_ERROR("failed to setup CSC %d\n", ret); + return; + } + } + + if (hw_cdm && hw_cdm->ops.setup_cdwn) { + ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg); + if (ret < 0) { + SDE_ERROR("failed to setup CDM %d\n", ret); + return; + } + } + + if (hw_cdm && hw_cdm->ops.enable) { + ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); + if (ret < 0) { + SDE_ERROR("failed to enable CDM %d\n", ret); + return; + } + } +} diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 2205dd98a927..20f125155de3 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -349,8 +349,8 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( #endif void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, - struct drm_framebuffer *fb, const struct sde_format *format, - struct sde_rect *wb_roi); + const struct sde_format *format, u32 output_type, + struct sde_rect *roi); /** * sde_encoder_helper_trigger_start - control start helper function diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 0b6ee302e231..78a8b732b0de 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -242,17 +242,20 @@ static void sde_encoder_phys_vid_setup_timing_engine( SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n"); drm_mode_debug_printmodeline(&mode); - if (phys_enc->split_role != ENC_ROLE_SOLO) { + if (phys_enc->split_role != ENC_ROLE_SOLO || + (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) { mode.hdisplay >>= 1; mode.htotal >>= 1; mode.hsync_start >>= 1; mode.hsync_end >>= 1; + mode.hskew >>= 1; SDE_DEBUG_VIDENC(vid_enc, - "split_role %d, halve horizontal %d %d %d %d\n", + "split_role %d, halve horizontal %d %d %d %d %d\n", phys_enc->split_role, mode.hdisplay, mode.htotal, - mode.hsync_start, mode.hsync_end); + mode.hsync_start, mode.hsync_end, + mode.hskew); } drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params); @@ -407,6 +410,9 @@ static void sde_encoder_phys_vid_mode_set( return; } + phys_enc->hw_ctl = NULL; + phys_enc->hw_cdm = NULL; + rm = &phys_enc->sde_kms->rm; vid_enc = to_sde_encoder_phys_vid(phys_enc); phys_enc->cached_mode = *adj_mode; @@ -427,6 +433,20 @@ static void sde_encoder_phys_vid_mode_set( phys_enc->hw_ctl = NULL; return; } + + /* CDM is optional */ + sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM); + for (i = 0; i <= instance; i++) { + sde_rm_get_hw(rm, &iter); + if (i == instance) + phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw; + } + + if (IS_ERR(phys_enc->hw_cdm)) { + SDE_ERROR("CDM required but not allocated: %ld\n", + PTR_ERR(phys_enc->hw_cdm)); + phys_enc->hw_cdm = NULL; + } } static int sde_encoder_phys_vid_control_vblank_irq( @@ -477,6 +497,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) struct sde_encoder_phys_vid *vid_enc; struct sde_hw_intf *intf; struct sde_hw_ctl *ctl; + struct sde_hw_cdm *hw_cdm = NULL; + struct drm_display_mode mode; + const struct sde_format *fmt = NULL; u32 flush_mask = 0; int ret; @@ -485,7 +508,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) SDE_ERROR("invalid encoder/device\n"); return; } + hw_cdm = phys_enc->hw_cdm; priv = phys_enc->parent->dev->dev_private; + mode = phys_enc->cached_mode; vid_enc = to_sde_encoder_phys_vid(phys_enc); intf = vid_enc->hw_intf; @@ -520,7 +545,21 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc) goto end; } + if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) + fmt = sde_get_sde_format(DRM_FORMAT_YUV420); + + if (fmt) { + struct sde_rect hdmi_roi; + + hdmi_roi.w = mode.hdisplay; + hdmi_roi.h = mode.vdisplay; + sde_encoder_phys_setup_cdm(phys_enc, fmt, + CDM_CDWN_OUTPUT_HDMI, &hdmi_roi); + } + ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx); + if (ctl->ops.get_bitmask_cdm && hw_cdm) + ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx); ctl->ops.update_pending_flush(ctl, flush_mask); SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n", @@ -569,6 +608,8 @@ static void sde_encoder_phys_vid_get_hw_resources( SDE_DEBUG_VIDENC(vid_enc, "\n"); hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO; + hw_res->needs_cdm = true; + SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm); } static int sde_encoder_phys_vid_wait_for_vblank( @@ -713,6 +754,11 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc) SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n", atomic_read(&phys_enc->vblank_refcount)); + if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) { + SDE_DEBUG_DRIVER("[cdm_disable]\n"); + phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm); + } + phys_enc->enable_state = SDE_ENC_DISABLED; } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index a48962a2384b..65b16419fcec 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -28,24 +28,6 @@ #define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1) -#define TO_S15D16(_x_) ((_x_) << 7) - -/** - * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix - * - */ -static struct sde_csc_cfg sde_encoder_phys_wb_rgb2yuv_601l = { - { - TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032), - TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1), - TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc) - }, - { 0x00, 0x00, 0x00 }, - { 0x0040, 0x0200, 0x0200 }, - { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff }, - { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 }, -}; - /** * sde_encoder_phys_wb_is_master - report wb always as master encoder */ @@ -105,96 +87,6 @@ static void sde_encoder_phys_wb_set_traffic_shaper( } /** - * sde_encoder_phys_setup_cdm - setup chroma down block - * @phys_enc: Pointer to physical encoder - * @fb: Pointer to output framebuffer - * @format: Output format - */ -void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc, - struct drm_framebuffer *fb, const struct sde_format *format, - struct sde_rect *wb_roi) -{ - struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm; - struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg; - int ret; - - if (!SDE_FORMAT_IS_YUV(format)) { - SDE_DEBUG("[cdm_disable fmt:%x]\n", - format->base.pixel_format); - - if (hw_cdm && hw_cdm->ops.disable) - hw_cdm->ops.disable(hw_cdm); - - return; - } - - memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg)); - - cdm_cfg->output_width = wb_roi->w; - cdm_cfg->output_height = wb_roi->h; - cdm_cfg->output_fmt = format; - cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB; - cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ? - CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT; - - /* enable 10 bit logic */ - switch (cdm_cfg->output_fmt->chroma_sample) { - case SDE_CHROMA_RGB: - cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - case SDE_CHROMA_H2V1: - cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - case SDE_CHROMA_420: - cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE; - cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE; - break; - case SDE_CHROMA_H1V2: - default: - SDE_ERROR("unsupported chroma sampling type\n"); - cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE; - cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE; - break; - } - - SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n", - cdm_cfg->output_width, - cdm_cfg->output_height, - cdm_cfg->output_fmt->base.pixel_format, - cdm_cfg->output_type, - cdm_cfg->output_bit_depth, - cdm_cfg->h_cdwn_type, - cdm_cfg->v_cdwn_type); - - if (hw_cdm && hw_cdm->ops.setup_csc_data) { - ret = hw_cdm->ops.setup_csc_data(hw_cdm, - &sde_encoder_phys_wb_rgb2yuv_601l); - if (ret < 0) { - SDE_ERROR("failed to setup CSC %d\n", ret); - return; - } - } - - if (hw_cdm && hw_cdm->ops.setup_cdwn) { - ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg); - if (ret < 0) { - SDE_ERROR("failed to setup CDM %d\n", ret); - return; - } - } - - if (hw_cdm && hw_cdm->ops.enable) { - ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg); - if (ret < 0) { - SDE_ERROR("failed to enable CDM %d\n", ret); - return; - } - } -} - -/** * sde_encoder_phys_wb_setup_fb - setup output framebuffer * @phys_enc: Pointer to physical encoder * @fb: Pointer to output framebuffer @@ -520,7 +412,8 @@ static void sde_encoder_phys_wb_setup( sde_encoder_phys_wb_set_traffic_shaper(phys_enc); - sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi); + sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt, + CDM_CDWN_OUTPUT_WB, wb_roi); sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index eb398fbee816..635c2e4e954b 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -2075,6 +2075,11 @@ static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg, goto end; } + if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) || + IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301)) { + sde_cfg->has_hdr = true; + } + index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size, 0, plane_formats, ARRAY_SIZE(plane_formats)); index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 01204df48871..73bb77b7afa6 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -655,6 +655,7 @@ struct sde_vp_cfg { * @csc_type csc or csc_10bit support. * @has_src_split source split feature status * @has_cdp Client driver prefetch feature status + * @has_hdr HDR feature support * @dma_formats Supported formats for dma pipe * @cursor_formats Supported formats for cursor pipe * @vig_formats Supported formats for vig pipe @@ -672,7 +673,7 @@ struct sde_mdss_cfg { u32 csc_type; bool has_src_split; bool has_cdp; - + bool has_hdr; u32 mdss_count; struct sde_mdss_base_cfg mdss[MAX_BLOCKS]; diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c index 188649d946d6..9ec81c227e60 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c @@ -227,7 +227,7 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx, return -EINVAL; if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) { - if (fmt->chroma_sample != SDE_CHROMA_H1V2) + if (fmt->chroma_sample == SDE_CHROMA_H1V2) return -EINVAL; /*unsupported format */ opmode = BIT(0); opmode |= (fmt->chroma_sample << 1); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h index 2592fe26cb38..1edeff6a7aec 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h @@ -63,6 +63,8 @@ enum sde_format_flags { (((X)->fetch_mode == SDE_FETCH_UBWC) && \ test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag)) +#define TO_S15D16(_x_) ((_x_) << 7) + #define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0) #define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0) #define SDE_BLEND_FG_ALPHA_FG_PIXEL (2 << 0) @@ -339,6 +341,12 @@ enum sde_3d_blend_mode { BLEND_3D_MAX }; +enum sde_csc_type { + SDE_CSC_RGB2YUV_601L, + SDE_CSC_RGB2YUV_601FR, + SDE_MAX_CSC +}; + /** struct sde_format - defines the format configuration which * allows SDE HW to correctly fetch and decode the format * @base: base msm_format struture containing fourcc code diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a9b01bcf7d0a..fcecaf5b5526 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4aa2cbe4c85f..a77521695c9a 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, /* macbook pro 8.2 */ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 990c9bca5127..afb489f10172 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2210,21 +2210,23 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device, if (fd != 0) dmabuf = dma_buf_get(fd - 1); } - up_read(¤t->mm->mmap_sem); - if (IS_ERR_OR_NULL(dmabuf)) + if (IS_ERR_OR_NULL(dmabuf)) { + up_read(¤t->mm->mmap_sem); return dmabuf ? PTR_ERR(dmabuf) : -ENODEV; + } ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf); if (ret) { dma_buf_put(dmabuf); + up_read(¤t->mm->mmap_sem); return ret; } /* Setup the user addr/cache mode for cache operations */ entry->memdesc.useraddr = hostptr; _setup_cache_mode(entry, vma); - + up_read(¤t->mm->mmap_sem); return 0; } #else diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 63315e86d172..ad8b0131bb46 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -1022,6 +1022,8 @@ static void __force_on(struct kgsl_device *device, int flag, int on) if (on) { switch (flag) { case KGSL_PWRFLAGS_CLK_ON: + /* make sure pwrrail is ON before enabling clocks */ + kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON); kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON, KGSL_STATE_ACTIVE); break; @@ -1817,7 +1819,12 @@ static int kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state) struct kgsl_pwrctrl *pwr = &device->pwrctrl; int status = 0; - if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags)) + /* + * Disabling the regulator means also disabling dependent clocks. + * Hence don't disable it if force clock ON is set. + */ + if (test_bit(KGSL_PWRFLAGS_POWER_ON, &pwr->ctrl_flags) || + test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->ctrl_flags)) return 0; if (state == KGSL_PWRFLAGS_OFF) { diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index 7f93ab8fa8d4..3e4cc4490792 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -522,7 +522,8 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) struct kgsl_device *device = dev_get_drvdata(dev); struct kgsl_pwrctrl *pwr; struct kgsl_pwrlevel *pwr_level; - int level, i; + int level; + unsigned int i; unsigned long cur_freq; if (device == NULL) @@ -550,7 +551,12 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) /* If the governor recommends a new frequency, update it here */ if (*freq != cur_freq) { level = pwr->max_pwrlevel; - for (i = pwr->min_pwrlevel; i >= pwr->max_pwrlevel; i--) + /* + * Array index of pwrlevels[] should be within the permitted + * power levels, i.e., from max_pwrlevel to min_pwrlevel. + */ + for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel + && i <= pwr->min_pwrlevel); i--) if (*freq <= pwr->pwrlevels[i].gpu_freq) { if (pwr->thermal_cycle == CYCLE_ACTIVE) level = _thermal_adjust(pwr, i); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index e37030624165..c7f8b70d15ee 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -285,6 +285,9 @@ #define USB_VENDOR_ID_DEALEXTREAME 0x10c5 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a +#define USB_VENDOR_ID_DELL 0x413c +#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 0b80633bae91..d4d655a10df1 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) return ret; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 6ca6ab00fa93..ce1543d69acb 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -72,6 +72,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 316d8b783d94..691c7bb3afac 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -764,6 +764,16 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) pm_runtime_get_sync(drvdata->dev); mutex_lock(&drvdata->mem_lock); + + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); + pm_runtime_put(drvdata->dev); + return -EBUSY; + } + spin_unlock_irqrestore(&drvdata->spinlock, flags); + if (drvdata->config_type == TMC_CONFIG_TYPE_ETR && drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) { /* @@ -807,19 +817,8 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) coresight_cti_map_trigout(drvdata->cti_flush, 1, 0); coresight_cti_map_trigin(drvdata->cti_reset, 2, 0); } - mutex_unlock(&drvdata->mem_lock); spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) { - spin_unlock_irqrestore(&drvdata->spinlock, flags); - - if (drvdata->config_type == TMC_CONFIG_TYPE_ETR - && drvdata->out_mode == TMC_ETR_OUT_MODE_USB) - usb_qdss_close(drvdata->usbch); - pm_runtime_put(drvdata->dev); - - return -EBUSY; - } if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { tmc_etb_enable_hw(drvdata); @@ -845,6 +844,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode) */ drvdata->sticky_enable = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); dev_info(drvdata->dev, "TMC enabled\n"); return 0; @@ -1140,6 +1140,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) unsigned long flags; enum tmc_mode mode; + mutex_lock(&drvdata->mem_lock); spin_lock_irqsave(&drvdata->spinlock, flags); if (!drvdata->sticky_enable) { dev_err(drvdata->dev, "enable tmc once before reading\n"); @@ -1172,11 +1173,13 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata) out: drvdata->reading = true; spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); dev_info(drvdata->dev, "TMC read start\n"); return 0; err: spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); return ret; } @@ -1353,7 +1356,11 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, { struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - char *bufp = drvdata->buf + *ppos; + char *bufp; + + mutex_lock(&drvdata->mem_lock); + + bufp = drvdata->buf + *ppos; if (*ppos + len > drvdata->size) len = drvdata->size - *ppos; @@ -1375,6 +1382,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, if (copy_to_user(data, bufp, len)) { dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__); + mutex_unlock(&drvdata->mem_lock); return -EFAULT; } @@ -1382,6 +1390,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n", __func__, len, (int)(drvdata->size - *ppos)); + + mutex_unlock(&drvdata->mem_lock); return len; } diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c index 1f042fa56eea..f4ed71f9c1a7 100644 --- a/drivers/i2c/busses/i2c-msm-v2.c +++ b/drivers/i2c/busses/i2c-msm-v2.c @@ -2232,19 +2232,8 @@ static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl) static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl) { int ret; - struct i2c_msm_xfer *xfer = &ctrl->xfer; mutex_lock(&ctrl->xfer.mtx); - /* if system is suspended just bail out */ - if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) { - struct i2c_msg *msgs = xfer->msgs + xfer->cur_buf.msg_idx; - dev_err(ctrl->dev, - "slave:0x%x is calling xfer when system is suspended\n", - msgs->addr); - mutex_unlock(&ctrl->xfer.mtx); - return -EIO; - } - i2c_msm_pm_pinctrl_state(ctrl, true); pm_runtime_get_sync(ctrl->dev); /* @@ -2330,6 +2319,14 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) return PTR_ERR(msgs); } + /* if system is suspended just bail out */ + if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) { + dev_err(ctrl->dev, + "slave:0x%x is calling xfer when system is suspended\n", + msgs->addr); + return -EIO; + } + ret = i2c_msm_pm_xfer_start(ctrl); if (ret) return ret; diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index 537cca877f66..5c20970ccbbb 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -331,8 +331,8 @@ static int rradc_post_process_therm(struct rradc_chip *chip, int64_t temp; /* K = code/4 */ - temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K); - temp *= FG_ADC_SCALE_MILLI_FACTOR; + temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR); + temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K); *result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL; return 0; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index e7b96f1ac2c5..5be14ad29d46 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), }, }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + }, { } }; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 4831eb910fc7..22160e481794 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -699,9 +699,9 @@ out_clear_state: out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index b92b8a724efb..f9711aceef54 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; - level_pfn = pfn & level_mask(level - 1); + level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 33176a4aa6ef..92e6ae48caf8 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -394,36 +394,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); - if (ret) { - kfree(device); - return ret; - } + if (ret) + goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return -ENOMEM; + ret = -ENOMEM; + goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { - kfree(device->name); if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ + kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } - - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return ret; + goto err_free_name; } kobject_get(group->devices_kobj); @@ -435,8 +429,10 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); if (group->domain) - __iommu_attach_device(group->domain, dev); + ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + if (ret) + goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, @@ -447,6 +443,21 @@ rename: pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); return 0; + +err_put_group: + mutex_lock(&group->mutex); + list_del(&device->list); + mutex_unlock(&group->mutex); + dev->iommu_group = NULL; + kobject_put(group->devices_kobj); +err_free_name: + kfree(device->name); +err_remove_link: + sysfs_remove_link(&dev->kobj, "iommu_group"); +err_free_device: + kfree(device); + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); + return ret; } EXPORT_SYMBOL_GPL(iommu_group_add_device); diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c index 6af589e5c230..a2a89b92c9f1 100644 --- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c @@ -1022,13 +1022,13 @@ static long msm_flash_subdev_do_ioctl( sd = vdev_to_v4l2_subdev(vdev); u32 = (struct msm_flash_cfg_data_t32 *)arg; - flash_data.cfg_type = u32->cfg_type; - for (i = 0; i < MAX_LED_TRIGGERS; i++) { - flash_data.flash_current[i] = u32->flash_current[i]; - flash_data.flash_duration[i] = u32->flash_duration[i]; - } switch (cmd) { case VIDIOC_MSM_FLASH_CFG32: + flash_data.cfg_type = u32->cfg_type; + for (i = 0; i < MAX_LED_TRIGGERS; i++) { + flash_data.flash_current[i] = u32->flash_current[i]; + flash_data.flash_duration[i] = u32->flash_duration[i]; + } cmd = VIDIOC_MSM_FLASH_CFG; switch (flash_data.cfg_type) { case CFG_FLASH_OFF: diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c index 28a5402a4359..236660dca3fb 100644 --- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c +++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c @@ -781,11 +781,10 @@ static long msm_ois_subdev_do_ioctl( u32 = (struct msm_ois_cfg_data32 *)arg; parg = arg; - ois_data.cfgtype = u32->cfgtype; - switch (cmd) { case VIDIOC_MSM_OIS_CFG32: cmd = VIDIOC_MSM_OIS_CFG; + ois_data.cfgtype = u32->cfgtype; switch (u32->cfgtype) { case CFG_OIS_CONTROL: diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 0e0f10e7fbb5..63f5497e63b8 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -776,40 +776,6 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev, } } -static int msm_isp_check_sync_time(struct msm_vfe_src_info *src_info, - struct msm_isp_timestamp *ts, - struct master_slave_resource_info *ms_res) -{ - int i; - struct msm_vfe_src_info *master_src_info = NULL; - uint32_t master_time = 0, current_time; - - if (!ms_res->src_sof_mask) - return 0; - - for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { - if (ms_res->src_info[i] == NULL) - continue; - if (src_info == ms_res->src_info[i] || - ms_res->src_info[i]->active == 0) - continue; - if (ms_res->src_sof_mask & - (1 << ms_res->src_info[i]->dual_hw_ms_info.index)) { - master_src_info = ms_res->src_info[i]; - break; - } - } - if (!master_src_info) - return 0; - master_time = master_src_info-> - dual_hw_ms_info.sof_info.mono_timestamp_ms; - current_time = ts->buf_time.tv_sec * 1000 + - ts->buf_time.tv_usec / 1000; - if ((current_time - master_time) > ms_res->sof_delta_threshold) - return 1; - return 0; -} - static void msm_isp_sync_dual_cam_frame_id( struct vfe_device *vfe_dev, struct master_slave_resource_info *ms_res, @@ -824,24 +790,11 @@ static void msm_isp_sync_dual_cam_frame_id( if (src_info->dual_hw_ms_info.sync_state == ms_res->dual_sync_mode) { - if (msm_isp_check_sync_time(src_info, ts, ms_res) == 0) { - (frame_src == VFE_PIX_0) ? src_info->frame_id += + (frame_src == VFE_PIX_0) ? src_info->frame_id += vfe_dev->axi_data.src_info[frame_src]. sof_counter_step : src_info->frame_id++; - return; - } - ms_res->src_sof_mask = 0; - ms_res->active_src_mask = 0; - for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) { - if (ms_res->src_info[i] == NULL) - continue; - if (ms_res->src_info[i]->active == 0) - continue; - ms_res->src_info[i]->dual_hw_ms_info. - sync_state = - MSM_ISP_DUAL_CAM_ASYNC; - } + return; } /* find highest frame id */ @@ -2616,6 +2569,7 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev, struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data; uint32_t bufq_handle = 0, bufq_id = 0; struct msm_isp_timestamp timestamp; + struct msm_vfe_frame_request_queue *queue_req; unsigned long flags; int vfe_idx; @@ -2652,8 +2606,18 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev, VFE_PING_FLAG); msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG); + stream_info->undelivered_request_cnt = 0; spin_unlock_irqrestore(&stream_info->lock, flags); + while (!list_empty(&stream_info->request_q)) { + queue_req = list_first_entry_or_null( + &stream_info->request_q, + struct msm_vfe_frame_request_queue, list); + if (queue_req) { + queue_req->cmd_used = 0; + list_del(&queue_req->list); + } + } for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) { bufq_handle = stream_info->bufq_handle[bufq_id]; @@ -3446,7 +3410,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev, } if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id != vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev-> - axi_data.src_info[VFE_PIX_0].sof_counter_step)) || + axi_data.src_info[frame_src].sof_counter_step)) || ((!vfe_dev->axi_data.src_info[frame_src].active))) { pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n", __func__, __LINE__, frame_id, diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index d30d8022b7ab..e87f2414a879 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -511,6 +511,9 @@ static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev, return -EINVAL; } + vfe_dev->axi_data. + src_info[input_cfg->input_src].sof_counter_step = 1; + vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock = input_cfg->input_pix_clk; vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg( diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c index ba9b4df6bf22..719b14226067 100644 --- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c @@ -374,6 +374,7 @@ static int msm_vb2_put_buf(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID vb=%pK, ses_id=%d, str_id=%d\n", vb, session_id, stream_id); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return -EINVAL; } msm_vb2 = @@ -428,6 +429,7 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n", session_id, stream_id, vb); spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock(&session->stream_rwlock); return -EINVAL; } msm_vb2 = diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c index 491b8d31935a..cf7d1a8aa1f4 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c +++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c @@ -152,6 +152,12 @@ static int32_t msm_flash_i2c_write_table( conf_array.reg_setting = settings->reg_setting_a; conf_array.size = settings->size; + /* Validate the settings size */ + if ((!conf_array.size) || (conf_array.size > MAX_I2C_REG_SET)) { + pr_err("failed: invalid size %d", conf_array.size); + return -EINVAL; + } + return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table( &flash_ctrl->flash_i2c_client, &conf_array); } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index acb697946e18..10f72a2155db 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -814,6 +814,9 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx, bw /= TRAFFIC_SHAPE_CLKTICK_12MS; if (bw > 0xFF) bw = 0xFF; + else if (bw == 0) + bw = 1; + SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, BIT(31) | bw); SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 79b5b3504ccd..0da9c5caea13 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -487,12 +487,17 @@ static int mmc_devfreq_set_target(struct device *dev, struct mmc_devfeq_clk_scaling *clk_scaling; int err = 0; int abort; + unsigned long pflags = current->flags; + + /* Ensure scaling would happen even in memory pressure conditions */ + current->flags |= PF_MEMALLOC; if (!(host && freq)) { pr_err("%s: unexpected host/freq parameter\n", __func__); err = -EINVAL; goto out; } + clk_scaling = &host->clk_scaling; if (!clk_scaling->enable) @@ -551,6 +556,7 @@ static int mmc_devfreq_set_target(struct device *dev, rel_host: mmc_release_host(host); out: + tsk_restore_flags(current, pflags, PF_MEMALLOC); return err; } diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 7173bf56c2c5..a28d6b98a042 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1323,10 +1323,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card) if (host->caps & MMC_CAP_WAIT_WHILE_BUSY) send_status = false; - /* Reduce frequency to HS */ - max_dtr = card->ext_csd.hs_max_dtr; - mmc_set_clock(host, max_dtr); - /* Switch HS400 to HS DDR */ val = EXT_CSD_TIMING_HS; err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING, @@ -1337,6 +1333,10 @@ int mmc_hs400_to_hs200(struct mmc_card *card) mmc_set_timing(host, MMC_TIMING_MMC_DDR52); + /* Reduce frequency to HS */ + max_dtr = card->ext_csd.hs_max_dtr; + mmc_set_clock(host, max_dtr); + if (!send_status) { err = mmc_switch_status(card, false); if (err) diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 5abab8800891..9190057535e6 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master, { uint32_t buf; size_t bytes_read; + int err; - if (mtd_read(master, offset, sizeof(buf), &bytes_read, - (uint8_t *)&buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset, sizeof(buf), &bytes_read, + (uint8_t *)&buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); goto out_default; } @@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, int trx_part = -1; int last_trx_part = -1; int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; + int err; /* * Some really old flashes (like AT45DB*) had smaller erasesize-s, but @@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master, /* Parse block by block looking for magics */ for (offset = 0; offset <= master->size - blocksize; offset += blocksize) { - /* Nothing more in higher memory */ - if (offset >= 0x2000000) + /* Nothing more in higher memory on BCM47XX (MIPS) */ + if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000) break; if (curr_part >= BCM47XXPART_MAX_PARTS) { @@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read beginning of the block */ - if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, - &bytes_read, (uint8_t *)buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, + &bytes_read, (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); continue; } @@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read middle of the block */ - if (mtd_read(master, offset + 0x8000, 0x4, - &bytes_read, (uint8_t *)buf) < 0) { - pr_err("mtd_read error while parsing (offset: 0x%X)!\n", - offset); + err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, + (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", + offset, err); continue; } @@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } offset = master->size - possible_nvram_sizes[i]; - if (mtd_read(master, offset, 0x4, &bytes_read, - (uint8_t *)buf) < 0) { - pr_err("mtd_read error while reading at offset 0x%X!\n", - offset); + err = mtd_read(master, offset, 0x4, &bytes_read, + (uint8_t *)buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while reading (offset 0x%X): %d\n", + offset, err); continue; } diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 37e4135ab213..64d6f053c2a5 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1057,6 +1057,13 @@ static int spansion_quad_enable(struct spi_nor *nor) return -EINVAL; } + ret = spi_nor_wait_till_ready(nor); + if (ret) { + dev_err(nor->dev, + "timeout while writing configuration register\n"); + return ret; + } + /* read back and check it */ ret = read_cr(nor); if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 5e6238e0b2bd..75e6e7e6baed 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2732,8 +2732,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); - if (ret) + if (ret) { + netdev_err(pdata->netdev, "error flushing TX queues\n"); return ret; + } /* * Initialize DMA related features diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 865b7e0b133b..64034ff081a0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_start\n"); - hw_if->init(pdata); + ret = hw_if->init(pdata); + if (ret) + return ret; ret = phy_if->phy_start(pdata); if (ret) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index b56c9c581359..70da30095b89 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) while (ring->start != ring->end) { int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[slot_idx]; - u32 ctl1; + u32 ctl0, ctl1; int len; if (slot_idx == empty_slot) break; + ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); len = ctl1 & BGMAC_DESC_CTL1_LEN; - if (ctl1 & BGMAC_DESC_CTL0_SOF) + if (ctl0 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ dma_unmap_single(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); @@ -469,6 +470,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, len -= ETH_FCS_LEN; skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); + if (unlikely(!skb)) { + bgmac_err(bgmac, "build_skb failed\n"); + put_page(virt_to_head_page(buf)); + break; + } skb_put(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET + @@ -1302,7 +1308,8 @@ static int bgmac_open(struct net_device *net_dev) phy_start(bgmac->phy_dev); - netif_carrier_on(net_dev); + netif_start_queue(net_dev); + return 0; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 1795c935ff02..7b8638ddb673 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1052,7 +1052,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: spin_unlock_bh(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 6a061f17a44f..4cd2a7d0124f 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2939,7 +2939,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, size, GFAR_RXB_TRUESIZE); /* try reuse page */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) return false; /* change offset to the other half */ diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index f9e4988ea30e..2f9b12cf9ee5 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1602,8 +1602,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); - netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_features = NETIF_F_SG; + if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + } netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index d74f5f4e5782..07eabf72c480 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); + korina_free_ring(dev); + if (korina_init(dev) < 0) { printk(KERN_ERR "%s: cannot restart device\n", dev->name); return; @@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; writel(tmp, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); cancel_work_sync(&lp->restart_task); + korina_free_ring(dev); + free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); free_irq(lp->ovr_irq, dev); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 71ec9cb08e06..15056f06754a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2446,7 +2446,7 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvneta_port_enable(pp); /* Enable polling on the port */ - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_enable(&port->napi); @@ -2472,7 +2472,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp) phy_stop(pp->phy_dev); - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); napi_disable(&port->napi); @@ -2902,13 +2902,11 @@ err_cleanup_rxqs: static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); - int cpu; mvneta_stop_dev(pp); mvneta_mdio_remove(pp); unregister_cpu_notifier(&pp->cpu_notifier); - for_each_present_cpu(cpu) - smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); + on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 603d1c3d3b2e..ff77b8b608bd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -542,8 +542,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -558,15 +559,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1e611980cf99..f5c1f4acc57b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -153,8 +153,9 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 +#define FW_PRE_INIT_TIMEOUT_MILI 10000 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { @@ -934,6 +935,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) */ dev->state = MLX5_DEVICE_STATE_UP; + /* wait for firmware to accept initialization segments configurations + */ + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + goto out; + } + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 1e61d4da72db..585e90f8341d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -221,18 +221,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) int ring_size; int i; - /* Free RX skb ringbuffer */ - if (priv->rx_skb[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) - dev_kfree_skb(priv->rx_skb[q][i]); - } - kfree(priv->rx_skb[q]); - priv->rx_skb[q] = NULL; - - /* Free aligned TX buffers */ - kfree(priv->tx_align[q]); - priv->tx_align[q] = NULL; - if (priv->rx_ring[q]) { for (i = 0; i < priv->num_rx_ring[q]; i++) { struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; @@ -261,6 +249,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_ring[q] = NULL; } + /* Free RX skb ringbuffer */ + if (priv->rx_skb[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) + dev_kfree_skb(priv->rx_skb[q][i]); + } + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + /* Free TX skb ringbuffer. * SKBs are freed by ravb_tx_free() call above. */ diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index d790cb8d9db3..8e832ba8ab24 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2796,6 +2796,11 @@ const struct efx_nic_type falcon_a1_nic_type = { .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, .mcdi_max_ver = -1, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; const struct efx_nic_type falcon_b0_nic_type = { @@ -2897,4 +2902,9 @@ const struct efx_nic_type falcon_b0_nic_type = { .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .mcdi_max_ver = -1, .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index d2701c53ed68..ebec2dceff45 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -822,8 +822,6 @@ static int marvell_read_status(struct phy_device *phydev) phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | mii_lpa_to_ethtool_lpa_t(lpa); - lpa &= adv; - if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) phydev->duplex = DUPLEX_FULL; else diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index c0b4e65267af..46fe1ae919a3 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg) if (rc) return rc; - iproc_mdio_config_clk(priv->base); - /* Prepare the read operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | @@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id, if (rc) return rc; - iproc_mdio_config_clk(priv->base); - /* Prepare the write operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | @@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev) bus->read = iproc_mdio_read; bus->write = iproc_mdio_write; + iproc_mdio_config_clk(priv->base); + rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7f7c87762bc6..8dfc75250583 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -47,8 +47,16 @@ module_param(gso, bool, 0444); */ DECLARE_EWMA(pkt_len, 1, 64) +/* With mergeable buffers we align buffer address and use the low bits to + * encode its true size. Buffer size is up to 1 page so we need to align to + * square root of page size to ensure we reserve enough bits to encode the true + * size. + */ +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) + /* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) #define VIRTNET_DRIVER_VERSION "1.0.0" diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 9a986ccd42e5..dab3bf6649e6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2240,7 +2240,7 @@ static void vxlan_cleanup(unsigned long arg) = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; - if (f->state & NUD_PERMANENT) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) continue; timeout = f->used + vxlan->cfg.age_interval * HZ; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 2217e0cae4da..7945b1cd6244 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -311,6 +311,14 @@ config CNSS_CRYPTO from cnss platform driver. This crypto APIs used to generate cipher key and add support for the WLAN driver module security protocol. +config CNSS_QCA6290 + bool "Enable CNSS QCA6290 chipset specific changes" + ---help--- + This enables the changes from WLAN host driver that are specific to + CNSS QCA6290 chipset. + These changes are needed to support the new hardware architecture + for CNSS QCA6290 chipset. + source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/b43/Kconfig" source "drivers/net/wireless/b43legacy/Kconfig" @@ -332,6 +340,7 @@ source "drivers/net/wireless/mwifiex/Kconfig" source "drivers/net/wireless/cw1200/Kconfig" source "drivers/net/wireless/rsi/Kconfig" source "drivers/net/wireless/cnss/Kconfig" +source "drivers/net/wireless/cnss2/Kconfig" source "drivers/net/wireless/cnss_genl/Kconfig" source "drivers/net/wireless/cnss_utils/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 704a976bdfb5..f23a2fbc3afa 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -64,6 +64,7 @@ obj-$(CONFIG_RSI_91X) += rsi/ obj-$(CONFIG_WCNSS_CORE) += wcnss/ obj-$(CONFIG_CNSS) += cnss/ +obj-$(CONFIG_CNSS2) += cnss2/ obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/ obj-$(CONFIG_CNSS_CRYPTO) += cnss_crypto/ obj-$(CONFIG_CNSS_GENL) += cnss_genl/ diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig index 8946f65df716..6faf9f1ef5d0 100644 --- a/drivers/net/wireless/cnss/Kconfig +++ b/drivers/net/wireless/cnss/Kconfig @@ -56,6 +56,9 @@ config CLD_LL_CORE select WEXT_PRIV select WEXT_SPY select WIRELESS_EXT + select CRYPTO + select CRYPTO_HASH + select CRYPTO_BLKCIPHER ---help--- This section contains the necessary modules needed to enable the core WLAN driver for Qualcomm QCA6174 chipset. @@ -73,7 +76,7 @@ config CNSS_SECURE_FW config BUS_AUTO_SUSPEND bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers" - depends on CNSS + depends on CNSS || CNSS2 depends on PCI ---help--- Runtime Power Management is supported for PCIe based WLAN Drivers. diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig new file mode 100644 index 000000000000..85d2a7b30a84 --- /dev/null +++ b/drivers/net/wireless/cnss2/Kconfig @@ -0,0 +1,17 @@ +config CNSS2 + tristate "CNSS2 Platform Driver for Wi-Fi Module" + depends on !CNSS && PCI_MSM + ---help--- + This module adds the support for Connectivity Subsystem (CNSS) used + for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets. + This driver also adds support to integrate WLAN module to subsystem + restart framework. + +config CNSS2_DEBUG + bool "CNSS2 Platform Driver Debug Support" + depends on CNSS2 + ---help--- + This option is to enable CNSS2 platform driver debug support which + primarily includes providing additional verbose logs for certain + features, enabling kernel panic for certain cases to aid the + debugging, and enabling any other debug mechanisms. diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile new file mode 100644 index 000000000000..9d383c8daa43 --- /dev/null +++ b/drivers/net/wireless/cnss2/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_CNSS2) += cnss2.o + +cnss2-y := main.o +cnss2-y += debug.o +cnss2-y += pci.o +cnss2-y += power.o +cnss2-y += qmi.o +cnss2-y += utils.o +cnss2-y += wlan_firmware_service_v01.o diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c new file mode 100644 index 000000000000..360ab31c61dd --- /dev/null +++ b/drivers/net/wireless/cnss2/debug.c @@ -0,0 +1,174 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/err.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> +#include "main.h" +#include "debug.h" + +#define CNSS_IPC_LOG_PAGES 32 + +void *cnss_ipc_log_context; + +static int cnss_pin_connect_show(struct seq_file *s, void *data) +{ + struct cnss_plat_data *cnss_priv = s->private; + + seq_puts(s, "Pin connect results\n"); + seq_printf(s, "FW power pin result: %04x\n", + cnss_priv->pin_result.fw_pwr_pin_result); + seq_printf(s, "FW PHY IO pin result: %04x\n", + cnss_priv->pin_result.fw_phy_io_pin_result); + seq_printf(s, "FW RF pin result: %04x\n", + cnss_priv->pin_result.fw_rf_pin_result); + seq_printf(s, "Host pin result: %04x\n", + cnss_priv->pin_result.host_pin_result); + seq_puts(s, "\n"); + + return 0; +} + +static int cnss_pin_connect_open(struct inode *inode, struct file *file) +{ + return single_open(file, cnss_pin_connect_show, inode->i_private); +} + +static const struct file_operations cnss_pin_connect_fops = { + .read = seq_read, + .release = single_release, + .open = cnss_pin_connect_open, + .owner = THIS_MODULE, + .llseek = seq_lseek, +}; + +static int cnss_stats_show_state(struct seq_file *s, + struct cnss_plat_data *plat_priv) +{ + enum cnss_driver_state i; + int skip = 0; + unsigned long state; + + seq_printf(s, "\nState: 0x%lx(", plat_priv->driver_state); + for (i = 0, state = plat_priv->driver_state; state != 0; + state >>= 1, i++) { + if (!(state & 0x1)) + continue; + + if (skip++) + seq_puts(s, " | "); + + switch (i) { + case CNSS_QMI_WLFW_CONNECTED: + seq_puts(s, "QMI_WLFW_CONNECTED"); + continue; + case CNSS_FW_MEM_READY: + seq_puts(s, "FW_MEM_READY"); + continue; + case CNSS_FW_READY: + seq_puts(s, "FW_READY"); + continue; + case CNSS_COLD_BOOT_CAL: + seq_puts(s, "COLD_BOOT_CAL"); + continue; + case CNSS_DRIVER_LOADING: + seq_puts(s, "DRIVER_LOADING"); + continue; + case CNSS_DRIVER_UNLOADING: + seq_puts(s, "DRIVER_UNLOADING"); + continue; + case CNSS_DRIVER_PROBED: + seq_puts(s, "DRIVER_PROBED"); + continue; + case CNSS_DRIVER_RECOVERY: + seq_puts(s, "DRIVER_RECOVERY"); + continue; + case CNSS_FW_BOOT_RECOVERY: + seq_puts(s, "FW_BOOT_RECOVERY"); + continue; + case CNSS_DEV_ERR_NOTIFY: + seq_puts(s, "DEV_ERR"); + } + + seq_printf(s, "UNKNOWN-%d", i); + } + seq_puts(s, ")\n"); + + return 0; +} + +static int cnss_stats_show(struct seq_file *s, void *data) +{ + struct cnss_plat_data *plat_priv = s->private; + + cnss_stats_show_state(s, plat_priv); + + return 0; +} + +static int cnss_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, cnss_stats_show, inode->i_private); +} + +static const struct file_operations cnss_stats_fops = { + .read = seq_read, + .release = single_release, + .open = cnss_stats_open, + .owner = THIS_MODULE, + .llseek = seq_lseek, +}; + +int cnss_debugfs_create(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct dentry *root_dentry; + + root_dentry = debugfs_create_dir("cnss", 0); + if (IS_ERR(root_dentry)) { + ret = PTR_ERR(root_dentry); + cnss_pr_err("Unable to create debugfs %d\n", ret); + goto out; + } + plat_priv->root_dentry = root_dentry; + debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv, + &cnss_pin_connect_fops); + debugfs_create_file("stats", 0644, root_dentry, plat_priv, + &cnss_stats_fops); +out: + return ret; +} + +void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv) +{ + debugfs_remove_recursive(plat_priv->root_dentry); +} + +int cnss_debug_init(void) +{ + cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES, + "cnss", 0); + if (!cnss_ipc_log_context) { + cnss_pr_err("Unable to create IPC log context!\n"); + return -EINVAL; + } + + return 0; +} + +void cnss_debug_deinit(void) +{ + if (cnss_ipc_log_context) { + ipc_log_context_destroy(cnss_ipc_log_context); + cnss_ipc_log_context = NULL; + } +} diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h new file mode 100644 index 000000000000..1621514eb5b2 --- /dev/null +++ b/drivers/net/wireless/cnss2/debug.h @@ -0,0 +1,73 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CNSS_DEBUG_H +#define _CNSS_DEBUG_H + +#include <linux/ipc_logging.h> +#include <linux/printk.h> + +extern void *cnss_ipc_log_context; + +#define cnss_ipc_log_string(_x...) do { \ + if (cnss_ipc_log_context) \ + ipc_log_string(cnss_ipc_log_context, _x); \ + } while (0) + +#define cnss_pr_err(_fmt, ...) do { \ + pr_err("cnss: " _fmt, ##__VA_ARGS__); \ + cnss_ipc_log_string("ERR: " pr_fmt(_fmt), \ + ##__VA_ARGS__); \ + } while (0) + +#define cnss_pr_warn(_fmt, ...) do { \ + pr_warn("cnss: " _fmt, ##__VA_ARGS__); \ + cnss_ipc_log_string("WRN: " pr_fmt(_fmt), \ + ##__VA_ARGS__); \ + } while (0) + +#define cnss_pr_info(_fmt, ...) do { \ + pr_info("cnss: " _fmt, ##__VA_ARGS__); \ + cnss_ipc_log_string("INF: " pr_fmt(_fmt), \ + ##__VA_ARGS__); \ + } while (0) + +#define cnss_pr_dbg(_fmt, ...) do { \ + pr_debug("cnss: " _fmt, ##__VA_ARGS__); \ + cnss_ipc_log_string("DBG: " pr_fmt(_fmt), \ + ##__VA_ARGS__); \ + } while (0) + +#ifdef CONFIG_CNSS2_DEBUG +#define CNSS_ASSERT(_condition) do { \ + if (!(_condition)) { \ + cnss_pr_err("ASSERT at line %d\n", \ + __LINE__); \ + WARN_ON(1); \ + } \ + } while (0) +#else +#define CNSS_ASSERT(_condition) do { \ + if (!(_condition)) { \ + cnss_pr_err("ASSERT at line %d\n", \ + __LINE__); \ + WARN_ON(1); \ + } \ + } while (0) +#endif + +int cnss_debug_init(void); +void cnss_debug_deinit(void); +int cnss_debugfs_create(struct cnss_plat_data *plat_priv); +void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv); + +#endif /* _CNSS_DEBUG_H */ diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c new file mode 100644 index 000000000000..29bfe1f4d6ed --- /dev/null +++ b/drivers/net/wireless/cnss2/main.c @@ -0,0 +1,2309 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_wakeup.h> +#include <linux/rwsem.h> +#include <linux/suspend.h> +#include <linux/timer.h> +#include <soc/qcom/ramdump.h> +#include <soc/qcom/subsystem_notif.h> + +#include "main.h" +#include "debug.h" +#include "pci.h" + +#define CNSS_DUMP_FORMAT_VER 0x11 +#define CNSS_DUMP_FORMAT_VER_V2 0x22 +#define CNSS_DUMP_MAGIC_VER_V2 0x42445953 +#define CNSS_DUMP_NAME "CNSS_WLAN" +#define CNSS_DUMP_DESC_SIZE 0x1000 +#define CNSS_DUMP_SEG_VER 0x1 +#define WLAN_RECOVERY_DELAY 1000 +#define FILE_SYSTEM_READY 1 +#define FW_READY_TIMEOUT 20000 +#define FW_ASSERT_TIMEOUT 5000 +#define CNSS_EVENT_PENDING 2989 + +static struct cnss_plat_data *plat_env; + +static DECLARE_RWSEM(cnss_pm_sem); + +static bool qmi_bypass; +#ifdef CONFIG_CNSS2_DEBUG +module_param(qmi_bypass, bool, 0600); +MODULE_PARM_DESC(qmi_bypass, "Bypass QMI from platform driver"); +#endif + +static bool enable_waltest; +#ifdef CONFIG_CNSS2_DEBUG +module_param(enable_waltest, bool, 0600); +MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest"); +#endif + +enum cnss_debug_quirks { + LINK_DOWN_SELF_RECOVERY, +}; + +unsigned long quirks; +#ifdef CONFIG_CNSS2_DEBUG +module_param(quirks, ulong, 0600); +MODULE_PARM_DESC(quirks, "Debug quirks for the driver"); +#endif + +static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = { + "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin", + "utfbd30.bin", "epping30.bin", "evicted30.bin" +}; + +static struct cnss_fw_files FW_FILES_DEFAULT = { + "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin", + "utfbd.bin", "epping.bin", "evicted.bin" +}; + +struct cnss_driver_event { + struct list_head list; + enum cnss_driver_event_type type; + bool sync; + struct completion complete; + int ret; + void *data; +}; + +static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev) +{ + if (!dev) + return CNSS_BUS_NONE; + + if (!dev->bus) + return CNSS_BUS_NONE; + + if (memcmp(dev->bus->name, "pci", 3) == 0) + return CNSS_BUS_PCI; + else + return CNSS_BUS_NONE; +} + +static void cnss_set_plat_priv(struct platform_device *plat_dev, + struct cnss_plat_data *plat_priv) +{ + plat_env = plat_priv; +} + +static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device + *plat_dev) +{ + return plat_env; +} + +void *cnss_bus_dev_to_bus_priv(struct device *dev) +{ + if (!dev) + return NULL; + + switch (cnss_get_dev_bus_type(dev)) { + case CNSS_BUS_PCI: + return cnss_get_pci_priv(to_pci_dev(dev)); + default: + return NULL; + } +} + +struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev) +{ + void *bus_priv; + + if (!dev) + return cnss_get_plat_priv(NULL); + + bus_priv = cnss_bus_dev_to_bus_priv(dev); + if (!bus_priv) + return NULL; + + switch (cnss_get_dev_bus_type(dev)) { + case CNSS_BUS_PCI: + return cnss_pci_priv_to_plat_priv(bus_priv); + default: + return NULL; + } +} + +static int cnss_pm_notify(struct notifier_block *b, + unsigned long event, void *p) +{ + switch (event) { + case PM_SUSPEND_PREPARE: + down_write(&cnss_pm_sem); + break; + case PM_POST_SUSPEND: + up_write(&cnss_pm_sem); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block cnss_pm_notifier = { + .notifier_call = cnss_pm_notify, +}; + +static void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv) +{ + if (atomic_inc_return(&plat_priv->pm_count) != 1) + return; + + cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", + plat_priv->driver_state, + atomic_read(&plat_priv->pm_count)); + pm_stay_awake(&plat_priv->plat_dev->dev); +} + +static void cnss_pm_relax(struct cnss_plat_data *plat_priv) +{ + int r = atomic_dec_return(&plat_priv->pm_count); + + WARN_ON(r < 0); + + if (r != 0) + return; + + cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", + plat_priv->driver_state, + atomic_read(&plat_priv->pm_count)); + pm_relax(&plat_priv->plat_dev->dev); +} + +void cnss_lock_pm_sem(void) +{ + down_read(&cnss_pm_sem); +} +EXPORT_SYMBOL(cnss_lock_pm_sem); + +void cnss_release_pm_sem(void) +{ + up_read(&cnss_pm_sem); +} +EXPORT_SYMBOL(cnss_release_pm_sem); + +int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files, + u32 target_type, u32 target_version) +{ + if (!pfw_files) + return -ENODEV; + + switch (target_version) { + case QCA6174_REV3_VERSION: + case QCA6174_REV3_2_VERSION: + memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files)); + break; + default: + memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files)); + cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X", + target_type, target_version); + break; + } + + return 0; +} +EXPORT_SYMBOL(cnss_get_fw_files_for_target); + +int cnss_request_bus_bandwidth(int bandwidth) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_bus_bw_info *bus_bw_info; + + if (!plat_priv) + return -ENODEV; + + bus_bw_info = &plat_priv->bus_bw_info; + if (!bus_bw_info->bus_client) + return -EINVAL; + + switch (bandwidth) { + case CNSS_BUS_WIDTH_NONE: + case CNSS_BUS_WIDTH_LOW: + case CNSS_BUS_WIDTH_MEDIUM: + case CNSS_BUS_WIDTH_HIGH: + ret = msm_bus_scale_client_update_request( + bus_bw_info->bus_client, bandwidth); + if (!ret) + bus_bw_info->current_bw_vote = bandwidth; + else + cnss_pr_err("Could not set bus bandwidth: %d, err = %d\n", + bandwidth, ret); + break; + default: + cnss_pr_err("Invalid bus bandwidth: %d", bandwidth); + ret = -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(cnss_request_bus_bandwidth); + +int cnss_get_platform_cap(struct cnss_platform_cap *cap) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + + if (!plat_priv) + return -ENODEV; + + if (cap) + *cap = plat_priv->cap; + + return 0; +} +EXPORT_SYMBOL(cnss_get_platform_cap); + +int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + void *bus_priv = cnss_bus_dev_to_bus_priv(dev); + + if (!plat_priv) + return -ENODEV; + + ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL(cnss_get_soc_info); + +void cnss_set_driver_status(enum cnss_driver_status driver_status) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + + if (!plat_priv) + return; + + plat_priv->driver_status = driver_status; +} +EXPORT_SYMBOL(cnss_set_driver_status); + +void cnss_request_pm_qos(u32 qos_val) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + + if (!plat_priv) + return; + + pm_qos_add_request(&plat_priv->qos_request, PM_QOS_CPU_DMA_LATENCY, + qos_val); +} +EXPORT_SYMBOL(cnss_request_pm_qos); + +void cnss_remove_pm_qos(void) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + + if (!plat_priv) + return; + + pm_qos_remove_request(&plat_priv->qos_request); +} +EXPORT_SYMBOL(cnss_remove_pm_qos); + +u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct cnss_wlan_mac_info *wlan_mac_info; + struct cnss_wlan_mac_addr *addr; + + if (!plat_priv) + goto out; + + wlan_mac_info = &plat_priv->wlan_mac_info; + if (!wlan_mac_info->is_wlan_mac_set) { + cnss_pr_info("Platform driver doesn't have any MAC address!\n"); + goto out; + } + + addr = &wlan_mac_info->wlan_mac_addr; + *num = addr->no_of_mac_addr_set; + + return &addr->mac_addr[0][0]; +out: + *num = 0; + return NULL; +} +EXPORT_SYMBOL(cnss_common_get_wlan_mac_address); + +int cnss_wlan_enable(struct device *dev, + struct cnss_wlan_enable_cfg *config, + enum cnss_driver_mode mode, + const char *host_version) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct wlfw_wlan_cfg_req_msg_v01 req; + u32 i; + int ret = 0; + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + if (qmi_bypass) + return 0; + + if (!config || !host_version) { + cnss_pr_err("Invalid config or host_version pointer\n"); + return -EINVAL; + } + + cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n", + mode, config, host_version); + + if (mode == CNSS_WALTEST || mode == CNSS_CCPM) + goto skip_cfg; + + memset(&req, 0, sizeof(req)); + + req.host_version_valid = 1; + strlcpy(req.host_version, host_version, + QMI_WLFW_MAX_STR_LEN_V01 + 1); + + req.tgt_cfg_valid = 1; + if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) + req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; + else + req.tgt_cfg_len = config->num_ce_tgt_cfg; + for (i = 0; i < req.tgt_cfg_len; i++) { + req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; + req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; + req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; + req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; + req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; + } + + req.svc_cfg_valid = 1; + if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) + req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; + else + req.svc_cfg_len = config->num_ce_svc_pipe_cfg; + for (i = 0; i < req.svc_cfg_len; i++) { + req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; + req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; + req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; + } + + req.shadow_reg_v2_valid = 1; + if (config->num_shadow_reg_v2_cfg > + QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01) + req.shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01; + else + req.shadow_reg_v2_len = config->num_shadow_reg_v2_cfg; + + memcpy(req.shadow_reg_v2, config->shadow_reg_v2_cfg, + sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01) + * req.shadow_reg_v2_len); + + ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req); + if (ret) + goto out; + +skip_cfg: + ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode); +out: + return ret; +} +EXPORT_SYMBOL(cnss_wlan_enable); + +int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + if (qmi_bypass) + return 0; + + return cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01); +} +EXPORT_SYMBOL(cnss_wlan_disable); + +#ifdef CONFIG_CNSS2_DEBUG +int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type, + u32 data_len, u8 *output) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + int ret = 0; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -EINVAL; + } + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + if (!output || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) { + cnss_pr_err("Invalid parameters for athdiag read: output %p, data_len %u\n", + output, data_len); + ret = -EINVAL; + goto out; + } + + if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { + cnss_pr_err("Invalid state for athdiag read: 0x%lx\n", + plat_priv->driver_state); + ret = -EINVAL; + goto out; + } + + ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type, + data_len, output); + +out: + return ret; +} +EXPORT_SYMBOL(cnss_athdiag_read); + +int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type, + u32 data_len, u8 *input) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + int ret = 0; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -EINVAL; + } + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + if (!input || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) { + cnss_pr_err("Invalid parameters for athdiag write: input %p, data_len %u\n", + input, data_len); + ret = -EINVAL; + goto out; + } + + if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { + cnss_pr_err("Invalid state for athdiag write: 0x%lx\n", + plat_priv->driver_state); + ret = -EINVAL; + goto out; + } + + ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type, + data_len, input); + +out: + return ret; +} +EXPORT_SYMBOL(cnss_athdiag_write); +#else +int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type, + u32 data_len, u8 *output) +{ + return -EPERM; +} +EXPORT_SYMBOL(cnss_athdiag_read); + +int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type, + u32 data_len, u8 *input) +{ + return -EPERM; +} +EXPORT_SYMBOL(cnss_athdiag_write); +#endif + +int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode); +} +EXPORT_SYMBOL(cnss_set_fw_log_mode); + +static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state); + + ret = cnss_wlfw_tgt_cap_send_sync(plat_priv); + if (ret) + goto out; + + ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv); + if (ret) + goto out; + + ret = cnss_pci_load_m3(plat_priv->bus_priv); + if (ret) + goto out; + + ret = cnss_wlfw_m3_dnld_send_sync(plat_priv); + if (ret) + goto out; + + return 0; +out: + return ret; +} + +static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv) +{ + int ret; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!plat_priv->driver_ops) { + cnss_pr_err("driver_ops is NULL!"); + ret = -EINVAL; + goto out; + } + + if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { + ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev, + pci_priv->pci_device_id); + if (ret) { + cnss_pr_err("Failed to reinit host driver, err = %d\n", + ret); + goto out; + } + clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); + } else { + ret = plat_priv->driver_ops->probe(pci_priv->pci_dev, + pci_priv->pci_device_id); + if (ret) { + cnss_pr_err("Failed to probe host driver, err = %d\n", + ret); + goto out; + } + clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); + set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state); + } + + return 0; + +out: + return ret; +} + +static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + del_timer(&plat_priv->fw_boot_timer); + set_bit(CNSS_FW_READY, &plat_priv->driver_state); + + if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) { + clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state); + clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); + } + + if (enable_waltest) { + ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, + QMI_WLFW_WALTEST_V01); + } else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) { + ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, + QMI_WLFW_CALIBRATION_V01); + } else { + ret = cnss_driver_call_probe(plat_priv); + } + + if (ret) + goto shutdown; + + return 0; + +shutdown: + cnss_pci_stop_mhi(plat_priv->bus_priv); + cnss_suspend_pci_link(plat_priv->bus_priv); + cnss_power_off_device(plat_priv); + + return ret; +} + +static char *cnss_driver_event_to_str(enum cnss_driver_event_type type) +{ + switch (type) { + case CNSS_DRIVER_EVENT_SERVER_ARRIVE: + return "SERVER_ARRIVE"; + case CNSS_DRIVER_EVENT_SERVER_EXIT: + return "SERVER_EXIT"; + case CNSS_DRIVER_EVENT_REQUEST_MEM: + return "REQUEST_MEM"; + case CNSS_DRIVER_EVENT_FW_MEM_READY: + return "FW_MEM_READY"; + case CNSS_DRIVER_EVENT_FW_READY: + return "FW_READY"; + case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START: + return "COLD_BOOT_CAL_START"; + case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE: + return "COLD_BOOT_CAL_DONE"; + case CNSS_DRIVER_EVENT_REGISTER_DRIVER: + return "REGISTER_DRIVER"; + case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER: + return "UNREGISTER_DRIVER"; + case CNSS_DRIVER_EVENT_RECOVERY: + return "RECOVERY"; + case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT: + return "FORCE_FW_ASSERT"; + case CNSS_DRIVER_EVENT_MAX: + return "EVENT_MAX"; + } + + return "UNKNOWN"; +}; + +int cnss_driver_event_post(struct cnss_plat_data *plat_priv, + enum cnss_driver_event_type type, + bool sync, void *data) +{ + struct cnss_driver_event *event; + unsigned long flags; + int gfp = GFP_KERNEL; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx\n", + cnss_driver_event_to_str(type), type, + sync ? "-sync" : "", plat_priv->driver_state); + + if (type >= CNSS_DRIVER_EVENT_MAX) { + cnss_pr_err("Invalid Event type: %d, can't post", type); + return -EINVAL; + } + + if (in_interrupt() || irqs_disabled()) + gfp = GFP_ATOMIC; + + event = kzalloc(sizeof(*event), gfp); + if (!event) + return -ENOMEM; + + cnss_pm_stay_awake(plat_priv); + + event->type = type; + event->data = data; + init_completion(&event->complete); + event->ret = CNSS_EVENT_PENDING; + event->sync = sync; + + spin_lock_irqsave(&plat_priv->event_lock, flags); + list_add_tail(&event->list, &plat_priv->event_list); + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + + queue_work(plat_priv->event_wq, &plat_priv->event_work); + + if (!sync) + goto out; + + ret = wait_for_completion_interruptible(&event->complete); + + cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n", + cnss_driver_event_to_str(type), type, + plat_priv->driver_state, ret, event->ret); + + spin_lock_irqsave(&plat_priv->event_lock, flags); + if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) { + event->sync = false; + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + ret = -EINTR; + goto out; + } + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + + ret = event->ret; + kfree(event); + +out: + cnss_pm_relax(plat_priv); + return ret; +} + +int cnss_power_up(struct device *dev) +{ + int ret = 0; + void *bus_priv = cnss_bus_dev_to_bus_priv(dev); + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (!bus_priv || !plat_priv) + return -ENODEV; + + if (plat_priv->device_id != QCA6174_DEVICE_ID) { + cnss_pr_dbg("Power up is not supported for device ID 0x%lx\n", + plat_priv->device_id); + return 0; + } + + ret = cnss_power_on_device(plat_priv); + if (ret) { + cnss_pr_err("Failed to power on device, err = %d\n", ret); + goto err_power_on; + } + + ret = cnss_resume_pci_link(bus_priv); + if (ret) { + cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); + goto err_resume_link; + } + + return 0; +err_resume_link: + cnss_power_off_device(plat_priv); +err_power_on: + return ret; +} +EXPORT_SYMBOL(cnss_power_up); + +int cnss_power_down(struct device *dev) +{ + int ret = 0; + void *bus_priv = cnss_bus_dev_to_bus_priv(dev); + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (!bus_priv || !plat_priv) + return -ENODEV; + + if (plat_priv->device_id != QCA6174_DEVICE_ID) { + cnss_pr_dbg("Power down is not supported for device ID 0x%lx\n", + plat_priv->device_id); + return 0; + } + + cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE); + cnss_pci_set_monitor_wake_intr(bus_priv, false); + cnss_pci_set_auto_suspended(bus_priv, 0); + + ret = cnss_suspend_pci_link(bus_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); + + cnss_power_off_device(plat_priv); + + return 0; +} +EXPORT_SYMBOL(cnss_power_down); + +int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + if (plat_priv->driver_ops) { + cnss_pr_err("Driver has already registered!\n"); + return -EEXIST; + } + + ret = cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_REGISTER_DRIVER, + true, driver_ops); + return ret; +} +EXPORT_SYMBOL(cnss_wlan_register_driver); + +void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops) +{ + struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return; + } + + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_UNREGISTER_DRIVER, + true, NULL); +} +EXPORT_SYMBOL(cnss_wlan_unregister_driver); + +static int cnss_get_resources(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + ret = cnss_get_vreg(plat_priv); + if (ret) { + cnss_pr_err("Failed to get vreg, err = %d\n", ret); + goto out; + } + + ret = cnss_get_pinctrl(plat_priv); + if (ret) { + cnss_pr_err("Failed to get pinctrl, err = %d\n", ret); + goto out; + } + + return 0; +out: + return ret; +} + +static void cnss_put_resources(struct cnss_plat_data *plat_priv) +{ +} + +static int cnss_modem_notifier_nb(struct notifier_block *nb, + unsigned long code, + void *ss_handle) +{ + struct cnss_plat_data *plat_priv = + container_of(nb, struct cnss_plat_data, modem_nb); + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + struct cnss_esoc_info *esoc_info; + struct cnss_wlan_driver *driver_ops; + + cnss_pr_dbg("Modem notifier: event %lu\n", code); + + if (!pci_priv) + return NOTIFY_DONE; + + esoc_info = &plat_priv->esoc_info; + + if (code == SUBSYS_AFTER_POWERUP) + esoc_info->modem_current_status = 1; + else if (code == SUBSYS_BEFORE_SHUTDOWN) + esoc_info->modem_current_status = 0; + else + return NOTIFY_DONE; + + driver_ops = plat_priv->driver_ops; + if (!driver_ops || !driver_ops->modem_status) + return NOTIFY_DONE; + + driver_ops->modem_status(pci_priv->pci_dev, + esoc_info->modem_current_status); + + return NOTIFY_OK; +} + +static int cnss_register_esoc(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct device *dev; + struct cnss_esoc_info *esoc_info; + struct esoc_desc *esoc_desc; + const char *client_desc; + + dev = &plat_priv->plat_dev->dev; + esoc_info = &plat_priv->esoc_info; + + esoc_info->notify_modem_status = + of_property_read_bool(dev->of_node, + "qcom,notify-modem-status"); + + if (esoc_info->notify_modem_status) + goto out; + + ret = of_property_read_string_index(dev->of_node, "esoc-names", 0, + &client_desc); + if (ret) { + cnss_pr_dbg("esoc-names is not defined in DT, skip!\n"); + } else { + esoc_desc = devm_register_esoc_client(dev, client_desc); + if (IS_ERR_OR_NULL(esoc_desc)) { + ret = PTR_RET(esoc_desc); + cnss_pr_err("Failed to register esoc_desc, err = %d\n", + ret); + goto out; + } + esoc_info->esoc_desc = esoc_desc; + } + + plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb; + esoc_info->modem_current_status = 0; + esoc_info->modem_notify_handler = + subsys_notif_register_notifier(esoc_info->esoc_desc ? + esoc_info->esoc_desc->name : + "modem", &plat_priv->modem_nb); + if (IS_ERR(esoc_info->modem_notify_handler)) { + ret = PTR_ERR(esoc_info->modem_notify_handler); + cnss_pr_err("Failed to register esoc notifier, err = %d\n", + ret); + goto unreg_esoc; + } + + return 0; +unreg_esoc: + if (esoc_info->esoc_desc) + devm_unregister_esoc_client(dev, esoc_info->esoc_desc); +out: + return ret; +} + +static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv) +{ + struct device *dev; + struct cnss_esoc_info *esoc_info; + + dev = &plat_priv->plat_dev->dev; + esoc_info = &plat_priv->esoc_info; + + if (esoc_info->notify_modem_status) + subsys_notif_unregister_notifier(esoc_info-> + modem_notify_handler, + &plat_priv->modem_nb); + if (esoc_info->esoc_desc) + devm_unregister_esoc_client(dev, esoc_info->esoc_desc); +} + +static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return -ENODEV; + } + + if (!plat_priv->driver_ops) { + cnss_pr_err("driver_ops is NULL!\n"); + return -EINVAL; + } + + ret = cnss_power_on_device(plat_priv); + if (ret) { + cnss_pr_err("Failed to power on device, err = %d\n", ret); + goto out; + } + + ret = cnss_resume_pci_link(pci_priv); + if (ret) { + cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); + goto power_off; + } + + ret = cnss_driver_call_probe(plat_priv); + if (ret) + goto suspend_link; + + return 0; +suspend_link: + cnss_suspend_pci_link(pci_priv); +power_off: + cnss_power_off_device(plat_priv); +out: + return ret; +} + +static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!pci_priv) + return -ENODEV; + + if (!plat_priv->driver_ops) + return -EINVAL; + + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE); + plat_priv->driver_ops->remove(pci_priv->pci_dev); + cnss_pci_set_monitor_wake_intr(pci_priv, false); + cnss_pci_set_auto_suspended(pci_priv, 0); + } else { + plat_priv->driver_ops->shutdown(pci_priv->pci_dev); + } + + ret = cnss_suspend_pci_link(pci_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); + + cnss_power_off_device(plat_priv); + + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); + clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state); + } + + return ret; +} + +static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv) +{ + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!plat_priv->driver_ops) + return; + + plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev); +} + +static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + unsigned int timeout; + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return -ENODEV; + } + + if (plat_priv->ramdump_info_v2.dump_data_valid) { + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT); + cnss_pci_clear_dump_info(pci_priv); + } + + ret = cnss_power_on_device(plat_priv); + if (ret) { + cnss_pr_err("Failed to power on device, err = %d\n", ret); + goto out; + } + + ret = cnss_resume_pci_link(pci_priv); + if (ret) { + cnss_pr_err("Failed to resume PCI link, err = %d\n", ret); + goto power_off; + } + + timeout = cnss_get_qmi_timeout(); + + ret = cnss_pci_start_mhi(pci_priv); + if (ret) { + cnss_pr_err("Failed to start MHI, err = %d\n", ret); + if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) && + !pci_priv->pci_link_down_ind && timeout) + mod_timer(&plat_priv->fw_boot_timer, + jiffies + msecs_to_jiffies(timeout >> 1)); + return 0; + } + + cnss_set_pin_connect_status(plat_priv); + + if (qmi_bypass) { + ret = cnss_driver_call_probe(plat_priv); + if (ret) + goto stop_mhi; + } else if (timeout) { + mod_timer(&plat_priv->fw_boot_timer, + jiffies + msecs_to_jiffies(timeout << 1)); + } + + return 0; + +stop_mhi: + cnss_pci_stop_mhi(pci_priv); + cnss_suspend_pci_link(pci_priv); +power_off: + cnss_power_off_device(plat_priv); +out: + return ret; +} + +static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + if (!pci_priv) + return -ENODEV; + + if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) || + test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) + goto skip_driver_remove; + + if (!plat_priv->driver_ops) + return -EINVAL; + + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE); + plat_priv->driver_ops->remove(pci_priv->pci_dev); + cnss_pci_set_monitor_wake_intr(pci_priv, false); + cnss_pci_set_auto_suspended(pci_priv, 0); + } else { + plat_priv->driver_ops->shutdown(pci_priv->pci_dev); + } + +skip_driver_remove: + cnss_pci_stop_mhi(pci_priv); + + ret = cnss_suspend_pci_link(pci_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret); + + cnss_power_off_device(plat_priv); + + clear_bit(CNSS_FW_READY, &plat_priv->driver_state); + clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state); + + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); + clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state); + } + + return ret; +} + +static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv) +{ + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + int ret = 0; + + cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n", + plat_priv->driver_state); + + if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) || + test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) || + test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) + return; + + ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_KERNEL_PANIC); + if (ret) { + cnss_pr_err("Fail to complete RDDM, err = %d\n", ret); + return; + } + + cnss_pci_collect_dump_info(pci_priv); +} + +static int cnss_powerup(const struct subsys_desc *subsys_desc) +{ + int ret = 0; + struct cnss_plat_data *plat_priv; + + if (!subsys_desc->dev) { + cnss_pr_err("dev from subsys_desc is NULL\n"); + return -ENODEV; + } + + plat_priv = dev_get_drvdata(subsys_desc->dev); + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + if (!plat_priv->driver_state) { + cnss_pr_dbg("Powerup is ignored.\n"); + return 0; + } + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + ret = cnss_qca6174_powerup(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + ret = cnss_qca6290_powerup(plat_priv); + break; + default: + cnss_pr_err("Unknown device_id found: 0x%lx\n", + plat_priv->device_id); + ret = -ENODEV; + } + + return ret; +} + +static int cnss_shutdown(const struct subsys_desc *subsys_desc, bool force_stop) +{ + int ret = 0; + struct cnss_plat_data *plat_priv; + + if (!subsys_desc->dev) { + cnss_pr_err("dev from subsys_desc is NULL\n"); + return -ENODEV; + } + + plat_priv = dev_get_drvdata(subsys_desc->dev); + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + ret = cnss_qca6174_shutdown(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + ret = cnss_qca6290_shutdown(plat_priv); + break; + default: + cnss_pr_err("Unknown device_id found: 0x%lx\n", + plat_priv->device_id); + ret = -ENODEV; + } + + return ret; +} + +static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv) +{ + struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2; + struct cnss_dump_data *dump_data = &info_v2->dump_data; + struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr; + struct ramdump_segment *ramdump_segs, *s; + int i, ret = 0; + + if (!info_v2->dump_data_valid || + dump_data->nentries == 0) + return 0; + + ramdump_segs = kcalloc(dump_data->nentries, + sizeof(*ramdump_segs), + GFP_KERNEL); + if (!ramdump_segs) + return -ENOMEM; + + s = ramdump_segs; + for (i = 0; i < dump_data->nentries; i++) { + s->address = dump_seg->address; + s->v_address = dump_seg->v_address; + s->size = dump_seg->size; + s++; + dump_seg++; + } + + ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs, + dump_data->nentries); + kfree(ramdump_segs); + + cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT); + cnss_pci_clear_dump_info(plat_priv->bus_priv); + + return ret; +} + +static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_ramdump_info *ramdump_info; + struct ramdump_segment segment; + + ramdump_info = &plat_priv->ramdump_info; + if (!ramdump_info->ramdump_size) + return -EINVAL; + + memset(&segment, 0, sizeof(segment)); + segment.v_address = ramdump_info->ramdump_va; + segment.size = ramdump_info->ramdump_size; + ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1); + + return ret; +} + +static int cnss_ramdump(int enable, const struct subsys_desc *subsys_desc) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + if (!enable) + return 0; + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + ret = cnss_qca6174_ramdump(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + ret = cnss_qca6290_ramdump(plat_priv); + break; + default: + cnss_pr_err("Unknown device_id found: 0x%lx\n", + plat_priv->device_id); + ret = -ENODEV; + } + + return ret; +} + +void *cnss_get_virt_ramdump_mem(unsigned long *size) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_ramdump_info *ramdump_info; + + if (!plat_priv) + return NULL; + + ramdump_info = &plat_priv->ramdump_info; + *size = ramdump_info->ramdump_size; + + return ramdump_info->ramdump_va; +} +EXPORT_SYMBOL(cnss_get_virt_ramdump_mem); + +void cnss_device_crashed(void) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_subsys_info *subsys_info; + + if (!plat_priv) + return; + + subsys_info = &plat_priv->subsys_info; + if (subsys_info->subsys_device) { + set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); + subsys_set_crash_status(subsys_info->subsys_device, true); + subsystem_restart_dev(subsys_info->subsys_device); + } +} +EXPORT_SYMBOL(cnss_device_crashed); + +static void cnss_crash_shutdown(const struct subsys_desc *subsys_desc) +{ + struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return; + } + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + cnss_qca6174_crash_shutdown(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + cnss_qca6290_crash_shutdown(plat_priv); + break; + default: + cnss_pr_err("Unknown device_id found: 0x%lx\n", + plat_priv->device_id); + } +} + +static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason) +{ + switch (reason) { + case CNSS_REASON_DEFAULT: + return "DEFAULT"; + case CNSS_REASON_LINK_DOWN: + return "LINK_DOWN"; + case CNSS_REASON_RDDM: + return "RDDM"; + case CNSS_REASON_TIMEOUT: + return "TIMEOUT"; + } + + return "UNKNOWN"; +}; + +static int cnss_do_recovery(struct cnss_plat_data *plat_priv, + enum cnss_recovery_reason reason) +{ + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + struct cnss_subsys_info *subsys_info = + &plat_priv->subsys_info; + int ret = 0; + + plat_priv->recovery_count++; + + if (plat_priv->device_id == QCA6174_DEVICE_ID) + goto self_recovery; + + if (plat_priv->driver_ops && + test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) + plat_priv->driver_ops->update_status(pci_priv->pci_dev, + CNSS_RECOVERY); + + switch (reason) { + case CNSS_REASON_LINK_DOWN: + if (test_bit(LINK_DOWN_SELF_RECOVERY, &quirks)) + goto self_recovery; + break; + case CNSS_REASON_RDDM: + clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); + ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM); + if (ret) { + cnss_pr_err("Failed to complete RDDM, err = %d\n", ret); + break; + } + cnss_pci_collect_dump_info(pci_priv); + break; + case CNSS_REASON_DEFAULT: + case CNSS_REASON_TIMEOUT: + break; + default: + cnss_pr_err("Unsupported recovery reason: %s(%d)\n", + cnss_recovery_reason_to_str(reason), reason); + break; + } + + if (!subsys_info->subsys_device) + return 0; + + subsys_set_crash_status(subsys_info->subsys_device, true); + subsystem_restart_dev(subsys_info->subsys_device); + + return 0; + +self_recovery: + cnss_shutdown(&subsys_info->subsys_desc, false); + cnss_powerup(&subsys_info->subsys_desc); + + return 0; +} + +static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv, + void *data) +{ + struct cnss_recovery_data *recovery_data = data; + int ret = 0; + + cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n", + cnss_recovery_reason_to_str(recovery_data->reason), + recovery_data->reason); + + if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { + cnss_pr_err("Recovery is already in progress!\n"); + ret = -EINVAL; + goto out; + } + + if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) { + cnss_pr_err("Driver unload is in progress, ignore recovery\n"); + ret = -EINVAL; + goto out; + } + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) { + cnss_pr_err("Driver load is in progress, ignore recovery\n"); + ret = -EINVAL; + goto out; + } + break; + default: + if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) { + set_bit(CNSS_FW_BOOT_RECOVERY, + &plat_priv->driver_state); + } else if (test_bit(CNSS_DRIVER_LOADING, + &plat_priv->driver_state)) { + cnss_pr_err("Driver probe is in progress, ignore recovery\n"); + ret = -EINVAL; + goto out; + } + break; + } + + set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state); + ret = cnss_do_recovery(plat_priv, recovery_data->reason); + +out: + kfree(data); + return ret; +} + +int cnss_self_recovery(struct device *dev, + enum cnss_recovery_reason reason) +{ + cnss_schedule_recovery(dev, reason); + return 0; +} +EXPORT_SYMBOL(cnss_self_recovery); + +void cnss_schedule_recovery(struct device *dev, + enum cnss_recovery_reason reason) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + struct cnss_recovery_data *data; + int gfp = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled()) + gfp = GFP_ATOMIC; + + data = kzalloc(sizeof(*data), gfp); + if (!data) + return; + + data->reason = reason; + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_RECOVERY, + false, data); +} +EXPORT_SYMBOL(cnss_schedule_recovery); + +static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv) +{ + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + int ret; + + ret = cnss_pci_set_mhi_state(plat_priv->bus_priv, + CNSS_MHI_TRIGGER_RDDM); + if (ret) { + cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret); + cnss_schedule_recovery(&pci_priv->pci_dev->dev, + CNSS_REASON_DEFAULT); + return 0; + } + + if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) { + mod_timer(&plat_priv->fw_boot_timer, + jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT)); + } + + return 0; +} + +int cnss_force_fw_assert(struct device *dev) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL\n"); + return -ENODEV; + } + + if (plat_priv->device_id == QCA6174_DEVICE_ID) { + cnss_pr_info("Forced FW assert is not supported\n"); + return -EINVAL; + } + + if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { + cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n"); + return 0; + } + + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_FORCE_FW_ASSERT, + false, NULL); + + return 0; +} +EXPORT_SYMBOL(cnss_force_fw_assert); + +void fw_boot_timeout(unsigned long data) +{ + struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data; + struct cnss_pci_data *pci_priv = plat_priv->bus_priv; + + cnss_pr_err("Timeout waiting for FW ready indication!\n"); + + cnss_schedule_recovery(&pci_priv->pci_dev->dev, + CNSS_REASON_TIMEOUT); +} + +static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv, + void *data) +{ + int ret = 0; + struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info; + + set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); + plat_priv->driver_ops = data; + + ret = cnss_powerup(&subsys_info->subsys_desc); + if (ret) { + clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state); + plat_priv->driver_ops = NULL; + } + + return ret; +} + +static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv) +{ + struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info; + + set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state); + cnss_shutdown(&subsys_info->subsys_desc, false); + plat_priv->driver_ops = NULL; + + return 0; +} + +static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info; + + set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state); + ret = cnss_powerup(&subsys_info->subsys_desc); + if (ret) + clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state); + + return ret; +} + +static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv) +{ + struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info; + + cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01); + cnss_shutdown(&subsys_info->subsys_desc, false); + clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state); + + return 0; +} + +static void cnss_driver_event_work(struct work_struct *work) +{ + struct cnss_plat_data *plat_priv = + container_of(work, struct cnss_plat_data, event_work); + struct cnss_driver_event *event; + unsigned long flags; + int ret = 0; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return; + } + + cnss_pm_stay_awake(plat_priv); + + spin_lock_irqsave(&plat_priv->event_lock, flags); + + while (!list_empty(&plat_priv->event_list)) { + event = list_first_entry(&plat_priv->event_list, + struct cnss_driver_event, list); + list_del(&event->list); + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + + cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n", + cnss_driver_event_to_str(event->type), + event->sync ? "-sync" : "", event->type, + plat_priv->driver_state); + + switch (event->type) { + case CNSS_DRIVER_EVENT_SERVER_ARRIVE: + ret = cnss_wlfw_server_arrive(plat_priv); + break; + case CNSS_DRIVER_EVENT_SERVER_EXIT: + ret = cnss_wlfw_server_exit(plat_priv); + break; + case CNSS_DRIVER_EVENT_REQUEST_MEM: + ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv); + if (ret) + break; + ret = cnss_wlfw_respond_mem_send_sync(plat_priv); + break; + case CNSS_DRIVER_EVENT_FW_MEM_READY: + ret = cnss_fw_mem_ready_hdlr(plat_priv); + break; + case CNSS_DRIVER_EVENT_FW_READY: + ret = cnss_fw_ready_hdlr(plat_priv); + break; + case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START: + ret = cnss_cold_boot_cal_start_hdlr(plat_priv); + break; + case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE: + ret = cnss_cold_boot_cal_done_hdlr(plat_priv); + break; + case CNSS_DRIVER_EVENT_REGISTER_DRIVER: + ret = cnss_register_driver_hdlr(plat_priv, + event->data); + break; + case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER: + ret = cnss_unregister_driver_hdlr(plat_priv); + break; + case CNSS_DRIVER_EVENT_RECOVERY: + ret = cnss_driver_recovery_hdlr(plat_priv, + event->data); + break; + case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT: + ret = cnss_force_fw_assert_hdlr(plat_priv); + break; + default: + cnss_pr_err("Invalid driver event type: %d", + event->type); + kfree(event); + spin_lock_irqsave(&plat_priv->event_lock, flags); + continue; + } + + spin_lock_irqsave(&plat_priv->event_lock, flags); + if (event->sync) { + event->ret = ret; + complete(&event->complete); + continue; + } + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + + kfree(event); + + spin_lock_irqsave(&plat_priv->event_lock, flags); + } + spin_unlock_irqrestore(&plat_priv->event_lock, flags); + + cnss_pm_relax(plat_priv); +} + +int cnss_register_subsys(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_subsys_info *subsys_info; + + subsys_info = &plat_priv->subsys_info; + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + subsys_info->subsys_desc.name = "AR6320"; + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + subsys_info->subsys_desc.name = "QCA6290"; + break; + default: + cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id); + ret = -ENODEV; + goto out; + } + + subsys_info->subsys_desc.owner = THIS_MODULE; + subsys_info->subsys_desc.powerup = cnss_powerup; + subsys_info->subsys_desc.shutdown = cnss_shutdown; + subsys_info->subsys_desc.ramdump = cnss_ramdump; + subsys_info->subsys_desc.crash_shutdown = cnss_crash_shutdown; + subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev; + + subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc); + if (IS_ERR(subsys_info->subsys_device)) { + ret = PTR_ERR(subsys_info->subsys_device); + cnss_pr_err("Failed to register subsys, err = %d\n", ret); + goto out; + } + + subsys_info->subsys_handle = + subsystem_get(subsys_info->subsys_desc.name); + if (!subsys_info->subsys_handle) { + cnss_pr_err("Failed to get subsys_handle!\n"); + ret = -EINVAL; + goto unregister_subsys; + } else if (IS_ERR(subsys_info->subsys_handle)) { + ret = PTR_ERR(subsys_info->subsys_handle); + cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret); + goto unregister_subsys; + } + + return 0; + +unregister_subsys: + subsys_unregister(subsys_info->subsys_device); +out: + return ret; +} + +void cnss_unregister_subsys(struct cnss_plat_data *plat_priv) +{ + struct cnss_subsys_info *subsys_info; + + subsys_info = &plat_priv->subsys_info; + subsystem_put(subsys_info->subsys_handle); + subsys_unregister(subsys_info->subsys_device); +} + +static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv) +{ + struct cnss_ramdump_info *ramdump_info; + struct msm_dump_entry dump_entry; + + ramdump_info = &plat_priv->ramdump_info; + ramdump_info->dump_data.addr = ramdump_info->ramdump_pa; + ramdump_info->dump_data.len = ramdump_info->ramdump_size; + ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER; + ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2; + strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME, + sizeof(ramdump_info->dump_data.name)); + dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN; + dump_entry.addr = virt_to_phys(&ramdump_info->dump_data); + + return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry); +} + +static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct device *dev; + struct cnss_subsys_info *subsys_info; + struct cnss_ramdump_info *ramdump_info; + u32 ramdump_size = 0; + + dev = &plat_priv->plat_dev->dev; + subsys_info = &plat_priv->subsys_info; + ramdump_info = &plat_priv->ramdump_info; + + if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic", + &ramdump_size) == 0) { + ramdump_info->ramdump_va = dma_alloc_coherent(dev, ramdump_size, + &ramdump_info->ramdump_pa, GFP_KERNEL); + + if (ramdump_info->ramdump_va) + ramdump_info->ramdump_size = ramdump_size; + } + + cnss_pr_dbg("ramdump va: %pK, pa: %pa\n", + ramdump_info->ramdump_va, &ramdump_info->ramdump_pa); + + if (ramdump_info->ramdump_size == 0) { + cnss_pr_info("Ramdump will not be collected"); + goto out; + } + + ret = cnss_init_dump_entry(plat_priv); + if (ret) { + cnss_pr_err("Failed to setup dump table, err = %d\n", ret); + goto free_ramdump; + } + + ramdump_info->ramdump_dev = create_ramdump_device( + subsys_info->subsys_desc.name, subsys_info->subsys_desc.dev); + if (!ramdump_info->ramdump_dev) { + cnss_pr_err("Failed to create ramdump device!"); + ret = -ENOMEM; + goto free_ramdump; + } + + return 0; +free_ramdump: + dma_free_coherent(dev, ramdump_info->ramdump_size, + ramdump_info->ramdump_va, ramdump_info->ramdump_pa); +out: + return ret; +} + +static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv) +{ + struct device *dev; + struct cnss_ramdump_info *ramdump_info; + + dev = &plat_priv->plat_dev->dev; + ramdump_info = &plat_priv->ramdump_info; + + if (ramdump_info->ramdump_dev) + destroy_ramdump_device(ramdump_info->ramdump_dev); + + if (ramdump_info->ramdump_va) + dma_free_coherent(dev, ramdump_info->ramdump_size, + ramdump_info->ramdump_va, + ramdump_info->ramdump_pa); +} + +static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_subsys_info *subsys_info; + struct cnss_ramdump_info_v2 *info_v2; + struct cnss_dump_data *dump_data; + struct msm_dump_entry dump_entry; + struct device *dev = &plat_priv->plat_dev->dev; + u32 ramdump_size = 0; + + subsys_info = &plat_priv->subsys_info; + info_v2 = &plat_priv->ramdump_info_v2; + dump_data = &info_v2->dump_data; + + if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic", + &ramdump_size) == 0) + info_v2->ramdump_size = ramdump_size; + + cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size); + + info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL); + if (!info_v2->dump_data_vaddr) + return -ENOMEM; + + dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr); + dump_data->version = CNSS_DUMP_FORMAT_VER_V2; + dump_data->magic = CNSS_DUMP_MAGIC_VER_V2; + dump_data->seg_version = CNSS_DUMP_SEG_VER; + strlcpy(dump_data->name, CNSS_DUMP_NAME, + sizeof(dump_data->name)); + dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN; + dump_entry.addr = virt_to_phys(dump_data); + + ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry); + if (ret) { + cnss_pr_err("Failed to setup dump table, err = %d\n", ret); + goto free_ramdump; + } + + info_v2->ramdump_dev = + create_ramdump_device(subsys_info->subsys_desc.name, + subsys_info->subsys_desc.dev); + if (!info_v2->ramdump_dev) { + cnss_pr_err("Failed to create ramdump device!\n"); + ret = -ENOMEM; + goto free_ramdump; + } + + return 0; + +free_ramdump: + kfree(info_v2->dump_data_vaddr); + info_v2->dump_data_vaddr = NULL; + return ret; +} + +static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv) +{ + struct cnss_ramdump_info_v2 *info_v2; + + info_v2 = &plat_priv->ramdump_info_v2; + + if (info_v2->ramdump_dev) + destroy_ramdump_device(info_v2->ramdump_dev); + + kfree(info_v2->dump_data_vaddr); + info_v2->dump_data_vaddr = NULL; + info_v2->dump_data_valid = false; +} + +int cnss_register_ramdump(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + ret = cnss_qca6174_register_ramdump(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + ret = cnss_qca6290_register_ramdump(plat_priv); + break; + default: + cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id); + ret = -ENODEV; + break; + } + return ret; +} + +void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv) +{ + switch (plat_priv->device_id) { + case QCA6174_DEVICE_ID: + cnss_qca6174_unregister_ramdump(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + cnss_qca6290_unregister_ramdump(plat_priv); + break; + default: + cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id); + break; + } +} + +static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_bus_bw_info *bus_bw_info; + + bus_bw_info = &plat_priv->bus_bw_info; + + bus_bw_info->bus_scale_table = + msm_bus_cl_get_pdata(plat_priv->plat_dev); + if (bus_bw_info->bus_scale_table) { + bus_bw_info->bus_client = + msm_bus_scale_register_client( + bus_bw_info->bus_scale_table); + if (!bus_bw_info->bus_client) { + cnss_pr_err("Failed to register bus scale client!\n"); + ret = -EINVAL; + goto out; + } + } + + return 0; +out: + return ret; +} + +static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) +{ + struct cnss_bus_bw_info *bus_bw_info; + + bus_bw_info = &plat_priv->bus_bw_info; + + if (bus_bw_info->bus_client) + msm_bus_scale_unregister_client(bus_bw_info->bus_client); +} + +static ssize_t cnss_fs_ready_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int fs_ready = 0; + struct cnss_plat_data *plat_priv = dev_get_drvdata(dev); + + if (sscanf(buf, "%du", &fs_ready) != 1) + return -EINVAL; + + cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n", + fs_ready, count); + + if (qmi_bypass) { + cnss_pr_dbg("QMI is bypassed.\n"); + return count; + } + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return count; + } + + switch (plat_priv->device_id) { + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + break; + default: + cnss_pr_err("Not supported for device ID 0x%lx\n", + plat_priv->device_id); + return count; + } + + if (fs_ready == FILE_SYSTEM_READY) { + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START, + true, NULL); + } + + return count; +} + +static DEVICE_ATTR(fs_ready, 0220, NULL, cnss_fs_ready_store); + +static int cnss_create_sysfs(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + ret = device_create_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready); + if (ret) { + cnss_pr_err("Failed to create device file, err = %d\n", ret); + goto out; + } + + return 0; +out: + return ret; +} + +static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv) +{ + device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready); +} + +static int cnss_event_work_init(struct cnss_plat_data *plat_priv) +{ + spin_lock_init(&plat_priv->event_lock); + plat_priv->event_wq = alloc_workqueue("cnss_driver_event", + WQ_UNBOUND, 1); + if (!plat_priv->event_wq) { + cnss_pr_err("Failed to create event workqueue!\n"); + return -EFAULT; + } + + INIT_WORK(&plat_priv->event_work, cnss_driver_event_work); + INIT_LIST_HEAD(&plat_priv->event_list); + + return 0; +} + +static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv) +{ + destroy_workqueue(plat_priv->event_wq); +} + +static const struct platform_device_id cnss_platform_id_table[] = { + { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, }, + { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, }, +}; + +static const struct of_device_id cnss_of_match_table[] = { + { + .compatible = "qcom,cnss", + .data = (void *)&cnss_platform_id_table[0]}, + { + .compatible = "qcom,cnss-qca6290", + .data = (void *)&cnss_platform_id_table[1]}, + { }, +}; +MODULE_DEVICE_TABLE(of, cnss_of_match_table); + +static int cnss_probe(struct platform_device *plat_dev) +{ + int ret = 0; + struct cnss_plat_data *plat_priv; + const struct of_device_id *of_id; + const struct platform_device_id *device_id; + + if (cnss_get_plat_priv(plat_dev)) { + cnss_pr_err("Driver is already initialized!\n"); + ret = -EEXIST; + goto out; + } + + of_id = of_match_device(cnss_of_match_table, &plat_dev->dev); + if (!of_id || !of_id->data) { + cnss_pr_err("Failed to find of match device!\n"); + ret = -ENODEV; + goto out; + } + + device_id = of_id->data; + + plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv), + GFP_KERNEL); + if (!plat_priv) { + ret = -ENOMEM; + goto out; + } + + plat_priv->plat_dev = plat_dev; + plat_priv->device_id = device_id->driver_data; + cnss_set_plat_priv(plat_dev, plat_priv); + platform_set_drvdata(plat_dev, plat_priv); + + ret = cnss_get_resources(plat_priv); + if (ret) + goto reset_ctx; + + ret = cnss_power_on_device(plat_priv); + if (ret) + goto free_res; + + ret = cnss_pci_init(plat_priv); + if (ret) + goto power_off; + + ret = cnss_register_esoc(plat_priv); + if (ret) + goto deinit_pci; + + ret = cnss_register_bus_scale(plat_priv); + if (ret) + goto unreg_esoc; + + ret = cnss_create_sysfs(plat_priv); + if (ret) + goto unreg_bus_scale; + + ret = cnss_event_work_init(plat_priv); + if (ret) + goto remove_sysfs; + + ret = cnss_qmi_init(plat_priv); + if (ret) + goto deinit_event_work; + + ret = cnss_debugfs_create(plat_priv); + if (ret) + goto deinit_qmi; + + setup_timer(&plat_priv->fw_boot_timer, + fw_boot_timeout, (unsigned long)plat_priv); + + register_pm_notifier(&cnss_pm_notifier); + + ret = device_init_wakeup(&plat_dev->dev, true); + if (ret) + cnss_pr_err("Failed to init platform device wakeup source, err = %d\n", + ret); + + cnss_pr_info("Platform driver probed successfully.\n"); + + return 0; + +deinit_qmi: + cnss_qmi_deinit(plat_priv); +deinit_event_work: + cnss_event_work_deinit(plat_priv); +remove_sysfs: + cnss_remove_sysfs(plat_priv); +unreg_bus_scale: + cnss_unregister_bus_scale(plat_priv); +unreg_esoc: + cnss_unregister_esoc(plat_priv); +deinit_pci: + cnss_pci_deinit(plat_priv); +power_off: + cnss_power_off_device(plat_priv); +free_res: + cnss_put_resources(plat_priv); +reset_ctx: + platform_set_drvdata(plat_dev, NULL); + cnss_set_plat_priv(plat_dev, NULL); +out: + return ret; +} + +static int cnss_remove(struct platform_device *plat_dev) +{ + struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev); + + device_init_wakeup(&plat_dev->dev, false); + unregister_pm_notifier(&cnss_pm_notifier); + del_timer(&plat_priv->fw_boot_timer); + cnss_debugfs_destroy(plat_priv); + cnss_qmi_deinit(plat_priv); + cnss_event_work_deinit(plat_priv); + cnss_remove_sysfs(plat_priv); + cnss_unregister_bus_scale(plat_priv); + cnss_unregister_esoc(plat_priv); + cnss_pci_deinit(plat_priv); + cnss_put_resources(plat_priv); + platform_set_drvdata(plat_dev, NULL); + plat_env = NULL; + + return 0; +} + +static struct platform_driver cnss_platform_driver = { + .probe = cnss_probe, + .remove = cnss_remove, + .driver = { + .name = "cnss2", + .owner = THIS_MODULE, + .of_match_table = cnss_of_match_table, + }, +}; + +static int __init cnss_initialize(void) +{ + int ret = 0; + + cnss_debug_init(); + ret = platform_driver_register(&cnss_platform_driver); + if (ret) + cnss_debug_deinit(); + + return ret; +} + +static void __exit cnss_exit(void) +{ + platform_driver_unregister(&cnss_platform_driver); + cnss_debug_deinit(); +} + +module_init(cnss_initialize); +module_exit(cnss_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CNSS2 Platform Driver"); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h new file mode 100644 index 000000000000..e3a8d0cccd52 --- /dev/null +++ b/drivers/net/wireless/cnss2/main.h @@ -0,0 +1,221 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CNSS_MAIN_H +#define _CNSS_MAIN_H + +#include <linux/esoc_client.h> +#include <linux/etherdevice.h> +#include <linux/msm-bus.h> +#include <linux/pm_qos.h> +#include <net/cnss2.h> +#include <soc/qcom/memory_dump.h> +#include <soc/qcom/subsystem_restart.h> + +#include "qmi.h" + +#define MAX_NO_OF_MAC_ADDR 4 + +enum cnss_dev_bus_type { + CNSS_BUS_NONE = -1, + CNSS_BUS_PCI, +}; + +struct cnss_vreg_info { + struct regulator *reg; + const char *name; + u32 min_uv; + u32 max_uv; + u32 load_ua; + u32 delay_us; +}; + +struct cnss_pinctrl_info { + struct pinctrl *pinctrl; + struct pinctrl_state *bootstrap_active; + struct pinctrl_state *wlan_en_active; + struct pinctrl_state *wlan_en_sleep; +}; + +struct cnss_subsys_info { + struct subsys_device *subsys_device; + struct subsys_desc subsys_desc; + void *subsys_handle; +}; + +struct cnss_ramdump_info { + struct ramdump_device *ramdump_dev; + unsigned long ramdump_size; + void *ramdump_va; + phys_addr_t ramdump_pa; + struct msm_dump_data dump_data; +}; + +struct cnss_dump_seg { + unsigned long address; + void *v_address; + unsigned long size; + u32 type; +}; + +struct cnss_dump_data { + u32 version; + u32 magic; + char name[32]; + phys_addr_t paddr; + int nentries; + u32 seg_version; +}; + +struct cnss_ramdump_info_v2 { + struct ramdump_device *ramdump_dev; + unsigned long ramdump_size; + void *dump_data_vaddr; + bool dump_data_valid; + struct cnss_dump_data dump_data; +}; + +struct cnss_esoc_info { + struct esoc_desc *esoc_desc; + bool notify_modem_status; + void *modem_notify_handler; + int modem_current_status; +}; + +struct cnss_bus_bw_info { + struct msm_bus_scale_pdata *bus_scale_table; + u32 bus_client; + int current_bw_vote; +}; + +struct cnss_wlan_mac_addr { + u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN]; + u32 no_of_mac_addr_set; +}; + +struct cnss_wlan_mac_info { + struct cnss_wlan_mac_addr wlan_mac_addr; + bool is_wlan_mac_set; +}; + +struct cnss_fw_mem { + size_t size; + void *va; + phys_addr_t pa; + bool valid; +}; + +enum cnss_driver_event_type { + CNSS_DRIVER_EVENT_SERVER_ARRIVE, + CNSS_DRIVER_EVENT_SERVER_EXIT, + CNSS_DRIVER_EVENT_REQUEST_MEM, + CNSS_DRIVER_EVENT_FW_MEM_READY, + CNSS_DRIVER_EVENT_FW_READY, + CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START, + CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, + CNSS_DRIVER_EVENT_REGISTER_DRIVER, + CNSS_DRIVER_EVENT_UNREGISTER_DRIVER, + CNSS_DRIVER_EVENT_RECOVERY, + CNSS_DRIVER_EVENT_FORCE_FW_ASSERT, + CNSS_DRIVER_EVENT_MAX, +}; + +enum cnss_driver_state { + CNSS_QMI_WLFW_CONNECTED, + CNSS_FW_MEM_READY, + CNSS_FW_READY, + CNSS_COLD_BOOT_CAL, + CNSS_DRIVER_LOADING, + CNSS_DRIVER_UNLOADING, + CNSS_DRIVER_PROBED, + CNSS_DRIVER_RECOVERY, + CNSS_FW_BOOT_RECOVERY, + CNSS_DEV_ERR_NOTIFY, +}; + +struct cnss_recovery_data { + enum cnss_recovery_reason reason; +}; + +enum cnss_pins { + CNSS_WLAN_EN, + CNSS_PCIE_TXP, + CNSS_PCIE_TXN, + CNSS_PCIE_RXP, + CNSS_PCIE_RXN, + CNSS_PCIE_REFCLKP, + CNSS_PCIE_REFCLKN, + CNSS_PCIE_RST, + CNSS_PCIE_WAKE, +}; + +struct cnss_pin_connect_result { + u32 fw_pwr_pin_result; + u32 fw_phy_io_pin_result; + u32 fw_rf_pin_result; + u32 host_pin_result; +}; + +struct cnss_plat_data { + struct platform_device *plat_dev; + void *bus_priv; + struct cnss_vreg_info *vreg_info; + struct cnss_pinctrl_info pinctrl_info; + struct cnss_subsys_info subsys_info; + struct cnss_ramdump_info ramdump_info; + struct cnss_ramdump_info_v2 ramdump_info_v2; + struct cnss_esoc_info esoc_info; + struct cnss_bus_bw_info bus_bw_info; + struct notifier_block modem_nb; + struct cnss_platform_cap cap; + struct pm_qos_request qos_request; + unsigned long device_id; + struct cnss_wlan_driver *driver_ops; + enum cnss_driver_status driver_status; + u32 recovery_count; + struct cnss_wlan_mac_info wlan_mac_info; + unsigned long driver_state; + struct list_head event_list; + spinlock_t event_lock; /* spinlock for driver work event handling */ + struct work_struct event_work; + struct workqueue_struct *event_wq; + struct qmi_handle *qmi_wlfw_clnt; + struct work_struct qmi_recv_msg_work; + struct notifier_block qmi_wlfw_clnt_nb; + struct wlfw_rf_chip_info_s_v01 chip_info; + struct wlfw_rf_board_info_s_v01 board_info; + struct wlfw_soc_info_s_v01 soc_info; + struct wlfw_fw_version_info_s_v01 fw_version_info; + struct cnss_fw_mem fw_mem; + struct cnss_fw_mem m3_mem; + struct cnss_pin_connect_result pin_result; + struct dentry *root_dentry; + atomic_t pm_count; + struct timer_list fw_boot_timer; +}; + +void *cnss_bus_dev_to_bus_priv(struct device *dev); +struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev); +int cnss_driver_event_post(struct cnss_plat_data *plat_priv, + enum cnss_driver_event_type type, + bool sync, void *data); +int cnss_get_vreg(struct cnss_plat_data *plat_priv); +int cnss_get_pinctrl(struct cnss_plat_data *plat_priv); +int cnss_power_on_device(struct cnss_plat_data *plat_priv); +void cnss_power_off_device(struct cnss_plat_data *plat_priv); +int cnss_register_subsys(struct cnss_plat_data *plat_priv); +void cnss_unregister_subsys(struct cnss_plat_data *plat_priv); +int cnss_register_ramdump(struct cnss_plat_data *plat_priv); +void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv); +void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv); + +#endif /* _CNSS_MAIN_H */ diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c new file mode 100644 index 000000000000..a17b72ce03ba --- /dev/null +++ b/drivers/net/wireless/cnss2/pci.c @@ -0,0 +1,1610 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/firmware.h> +#include <linux/irq.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/pm_runtime.h> + +#include "main.h" +#include "debug.h" +#include "pci.h" + +#define PCI_LINK_UP 1 +#define PCI_LINK_DOWN 0 + +#define SAVE_PCI_CONFIG_SPACE 1 +#define RESTORE_PCI_CONFIG_SPACE 0 + +#define PM_OPTIONS_DEFAULT 0 +#define PM_OPTIONS_LINK_DOWN \ + (MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN) + +#define PCI_BAR_NUM 0 + +#ifdef CONFIG_ARM_LPAE +#define PCI_DMA_MASK 64 +#else +#define PCI_DMA_MASK 32 +#endif + +#define MHI_NODE_NAME "qcom,mhi" + +#define MAX_M3_FILE_NAME_LENGTH 13 +#define DEFAULT_M3_FILE_NAME "m3.bin" + +static DEFINE_SPINLOCK(pci_link_down_lock); + +static unsigned int pci_link_down_panic; +module_param(pci_link_down_panic, uint, 0600); +MODULE_PARM_DESC(pci_link_down_panic, + "Trigger kernel panic when PCI link down is detected"); + +static bool fbc_bypass; +#ifdef CONFIG_CNSS2_DEBUG +module_param(fbc_bypass, bool, 0600); +MODULE_PARM_DESC(fbc_bypass, + "Bypass firmware download when loading WLAN driver"); +#endif + +static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save) +{ + int ret = 0; + struct pci_dev *pci_dev = pci_priv->pci_dev; + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + bool link_down_or_recovery; + + if (!plat_priv) + return -ENODEV; + + link_down_or_recovery = pci_priv->pci_link_down_ind || + (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)); + + if (save) { + if (link_down_or_recovery) { + pci_priv->saved_state = NULL; + } else { + pci_save_state(pci_dev); + pci_priv->saved_state = pci_store_saved_state(pci_dev); + } + } else { + if (link_down_or_recovery) { + ret = msm_pcie_recover_config(pci_dev); + if (ret) { + cnss_pr_err("Failed to recover PCI config space, err = %d\n", + ret); + return ret; + } + } else if (pci_priv->saved_state) { + pci_load_and_free_saved_state(pci_dev, + &pci_priv->saved_state); + pci_restore_state(pci_dev); + } + } + + return 0; +} + +static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up) +{ + int ret = 0; + struct pci_dev *pci_dev = pci_priv->pci_dev; + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + bool link_down_or_recovery; + + if (!plat_priv) + return -ENODEV; + + link_down_or_recovery = pci_priv->pci_link_down_ind || + (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)); + + ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME : + MSM_PCIE_SUSPEND, + pci_dev->bus->number, + pci_dev, NULL, + link_down_or_recovery ? + PM_OPTIONS_LINK_DOWN : + PM_OPTIONS_DEFAULT); + if (ret) { + cnss_pr_err("Failed to %s PCI link with %s option, err = %d\n", + link_up ? "resume" : "suspend", + link_down_or_recovery ? "link down" : "default", + ret); + return ret; + } + + return 0; +} + +int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + + if (!pci_priv) + return -ENODEV; + + if (!pci_priv->pci_link_state) { + cnss_pr_info("PCI link is already suspended!\n"); + goto out; + } + + ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE); + if (ret) + goto out; + + pci_disable_device(pci_priv->pci_dev); + + ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot); + if (ret) + cnss_pr_err("Failed to set D3Hot, err = %d\n", ret); + + ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN); + if (ret) + goto out; + + pci_priv->pci_link_state = PCI_LINK_DOWN; + + return 0; +out: + return ret; +} + +int cnss_resume_pci_link(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + + if (!pci_priv) + return -ENODEV; + + if (pci_priv->pci_link_state) { + cnss_pr_info("PCI link is already resumed!\n"); + goto out; + } + + ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP); + if (ret) + goto out; + + pci_priv->pci_link_state = PCI_LINK_UP; + + ret = pci_enable_device(pci_priv->pci_dev); + if (ret) { + cnss_pr_err("Failed to enable PCI device, err = %d\n", ret); + goto out; + } + + ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE); + if (ret) + goto out; + + pci_set_master(pci_priv->pci_dev); + + if (pci_priv->pci_link_down_ind) + pci_priv->pci_link_down_ind = false; + + return 0; +out: + return ret; +} + +int cnss_pci_link_down(struct device *dev) +{ + unsigned long flags; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return -EINVAL; + } + + if (pci_link_down_panic) + panic("cnss: PCI link is down!\n"); + + spin_lock_irqsave(&pci_link_down_lock, flags); + if (pci_priv->pci_link_down_ind) { + cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n"); + spin_unlock_irqrestore(&pci_link_down_lock, flags); + return -EINVAL; + } + pci_priv->pci_link_down_ind = true; + spin_unlock_irqrestore(&pci_link_down_lock, flags); + + cnss_pr_err("PCI link down is detected by host driver, schedule recovery!\n"); + + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR); + cnss_schedule_recovery(dev, CNSS_REASON_LINK_DOWN); + + return 0; +} +EXPORT_SYMBOL(cnss_pci_link_down); + +static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + struct device *dev; + struct dma_iommu_mapping *mapping; + int atomic_ctx = 1; + int s1_bypass = 1; + + dev = &pci_priv->pci_dev->dev; + + mapping = arm_iommu_create_mapping(&platform_bus_type, + pci_priv->smmu_iova_start, + pci_priv->smmu_iova_len); + if (IS_ERR(mapping)) { + ret = PTR_ERR(mapping); + cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret); + goto out; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_ATOMIC, + &atomic_ctx); + if (ret) { + pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n", + ret); + goto release_mapping; + } + + ret = iommu_domain_set_attr(mapping->domain, + DOMAIN_ATTR_S1_BYPASS, + &s1_bypass); + if (ret) { + pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n", + ret); + goto release_mapping; + } + + ret = arm_iommu_attach_device(dev, mapping); + if (ret) { + pr_err("Failed to attach SMMU device, err = %d\n", ret); + goto release_mapping; + } + + pci_priv->smmu_mapping = mapping; + + return ret; +release_mapping: + arm_iommu_release_mapping(mapping); +out: + return ret; +} + +static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv) +{ + arm_iommu_detach_device(&pci_priv->pci_dev->dev); + arm_iommu_release_mapping(pci_priv->smmu_mapping); + + pci_priv->smmu_mapping = NULL; +} + +static void cnss_pci_event_cb(struct msm_pcie_notify *notify) +{ + unsigned long flags; + struct pci_dev *pci_dev; + struct cnss_pci_data *pci_priv; + + if (!notify) + return; + + pci_dev = notify->user; + if (!pci_dev) + return; + + pci_priv = cnss_get_pci_priv(pci_dev); + if (!pci_priv) + return; + + switch (notify->event) { + case MSM_PCIE_EVENT_LINKDOWN: + if (pci_link_down_panic) + panic("cnss: PCI link is down!\n"); + + spin_lock_irqsave(&pci_link_down_lock, flags); + if (pci_priv->pci_link_down_ind) { + cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n"); + spin_unlock_irqrestore(&pci_link_down_lock, flags); + return; + } + pci_priv->pci_link_down_ind = true; + spin_unlock_irqrestore(&pci_link_down_lock, flags); + + cnss_pr_err("PCI link down, schedule recovery!\n"); + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR); + if (pci_dev->device == QCA6174_DEVICE_ID) + disable_irq(pci_dev->irq); + cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN); + break; + case MSM_PCIE_EVENT_WAKEUP: + if (cnss_pci_get_monitor_wake_intr(pci_priv) && + cnss_pci_get_auto_suspended(pci_priv)) { + cnss_pci_set_monitor_wake_intr(pci_priv, false); + pm_request_resume(&pci_dev->dev); + } + break; + default: + cnss_pr_err("Received invalid PCI event: %d\n", notify->event); + } +} + +static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + struct msm_pcie_register_event *pci_event; + + pci_event = &pci_priv->msm_pci_event; + pci_event->events = MSM_PCIE_EVENT_LINKDOWN | + MSM_PCIE_EVENT_WAKEUP; + pci_event->user = pci_priv->pci_dev; + pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK; + pci_event->callback = cnss_pci_event_cb; + pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY; + + ret = msm_pcie_register_event(pci_event); + if (ret) + cnss_pr_err("Failed to register MSM PCI event, err = %d\n", + ret); + + return ret; +} + +static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv) +{ + msm_pcie_deregister_event(&pci_priv->msm_pci_event); +} + +static int cnss_pci_suspend(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + pm_message_t state = { .event = PM_EVENT_SUSPEND }; + + if (!pci_priv) + goto out; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + goto out; + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->suspend) { + ret = driver_ops->suspend(pci_dev, state); + if (pci_priv->pci_link_state) { + if (cnss_pci_set_mhi_state(pci_priv, + CNSS_MHI_SUSPEND)) { + driver_ops->resume(pci_dev); + ret = -EAGAIN; + goto out; + } + + cnss_set_pci_config_space(pci_priv, + SAVE_PCI_CONFIG_SPACE); + pci_disable_device(pci_dev); + + ret = pci_set_power_state(pci_dev, PCI_D3hot); + if (ret) + cnss_pr_err("Failed to set D3Hot, err = %d\n", + ret); + } + } + + cnss_pci_set_monitor_wake_intr(pci_priv, false); + +out: + return ret; +} + +static int cnss_pci_resume(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + if (!pci_priv) + goto out; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + goto out; + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) { + ret = pci_enable_device(pci_dev); + if (ret) + cnss_pr_err("Failed to enable PCI device, err = %d\n", + ret); + + if (pci_priv->saved_state) + cnss_set_pci_config_space(pci_priv, + RESTORE_PCI_CONFIG_SPACE); + + pci_set_master(pci_dev); + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME); + + ret = driver_ops->resume(pci_dev); + } + +out: + return ret; +} + +static int cnss_pci_suspend_noirq(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + if (!pci_priv) + goto out; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + goto out; + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->suspend_noirq) + ret = driver_ops->suspend_noirq(pci_dev); + +out: + return ret; +} + +static int cnss_pci_resume_noirq(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + if (!pci_priv) + goto out; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + goto out; + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->resume_noirq && + !pci_priv->pci_link_down_ind) + ret = driver_ops->resume_noirq(pci_dev); + +out: + return ret; +} + +static int cnss_pci_runtime_suspend(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + if (!pci_priv) + return -EAGAIN; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + return -EAGAIN; + + if (pci_priv->pci_link_down_ind) { + cnss_pr_dbg("PCI link down recovery is in progress!\n"); + return -EAGAIN; + } + + cnss_pr_dbg("Runtime suspend start\n"); + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->runtime_ops && + driver_ops->runtime_ops->runtime_suspend) + ret = driver_ops->runtime_ops->runtime_suspend(pci_dev); + + cnss_pr_info("Runtime suspend status: %d\n", ret); + + return ret; +} + +static int cnss_pci_runtime_resume(struct device *dev) +{ + int ret = 0; + struct pci_dev *pci_dev = to_pci_dev(dev); + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv; + struct cnss_wlan_driver *driver_ops; + + if (!pci_priv) + return -EAGAIN; + + plat_priv = pci_priv->plat_priv; + if (!plat_priv) + return -EAGAIN; + + if (pci_priv->pci_link_down_ind) { + cnss_pr_dbg("PCI link down recovery is in progress!\n"); + return -EAGAIN; + } + + cnss_pr_dbg("Runtime resume start\n"); + + driver_ops = plat_priv->driver_ops; + if (driver_ops && driver_ops->runtime_ops && + driver_ops->runtime_ops->runtime_resume) + ret = driver_ops->runtime_ops->runtime_resume(pci_dev); + + cnss_pr_info("Runtime resume status: %d\n", ret); + + return ret; +} + +static int cnss_pci_runtime_idle(struct device *dev) +{ + cnss_pr_dbg("Runtime idle\n"); + + pm_request_autosuspend(dev); + + return -EBUSY; +} + +int cnss_wlan_pm_control(bool vote) +{ + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct cnss_pci_data *pci_priv; + struct pci_dev *pci_dev; + + if (!plat_priv) + return -ENODEV; + + pci_priv = plat_priv->bus_priv; + if (!pci_priv) + return -ENODEV; + + pci_dev = pci_priv->pci_dev; + + return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC : + MSM_PCIE_ENABLE_PC, + pci_dev->bus->number, pci_dev, + NULL, PM_OPTIONS_DEFAULT); +} +EXPORT_SYMBOL(cnss_wlan_pm_control); + +int cnss_auto_suspend(void) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct pci_dev *pci_dev; + struct cnss_pci_data *pci_priv; + struct cnss_bus_bw_info *bus_bw_info; + + if (!plat_priv) + return -ENODEV; + + pci_priv = plat_priv->bus_priv; + if (!pci_priv) + return -ENODEV; + + pci_dev = pci_priv->pci_dev; + + if (pci_priv->pci_link_state) { + if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) { + ret = -EAGAIN; + goto out; + } + + cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE); + pci_disable_device(pci_dev); + + ret = pci_set_power_state(pci_dev, PCI_D3hot); + if (ret) + cnss_pr_err("Failed to set D3Hot, err = %d\n", ret); + if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) { + cnss_pr_err("Failed to shutdown PCI link!\n"); + ret = -EAGAIN; + goto resume_mhi; + } + } + + pci_priv->pci_link_state = PCI_LINK_DOWN; + cnss_pci_set_auto_suspended(pci_priv, 1); + cnss_pci_set_monitor_wake_intr(pci_priv, true); + + bus_bw_info = &plat_priv->bus_bw_info; + msm_bus_scale_client_update_request(bus_bw_info->bus_client, + CNSS_BUS_WIDTH_NONE); + + return 0; + +resume_mhi: + if (pci_enable_device(pci_dev)) + cnss_pr_err("Failed to enable PCI device!\n"); + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME); +out: + return ret; +} +EXPORT_SYMBOL(cnss_auto_suspend); + +int cnss_auto_resume(void) +{ + int ret = 0; + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct pci_dev *pci_dev; + struct cnss_pci_data *pci_priv; + struct cnss_bus_bw_info *bus_bw_info; + + if (!plat_priv) + return -ENODEV; + + pci_priv = plat_priv->bus_priv; + if (!pci_priv) + return -ENODEV; + + pci_dev = pci_priv->pci_dev; + if (!pci_priv->pci_link_state) { + if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) { + cnss_pr_err("Failed to resume PCI link!\n"); + ret = -EAGAIN; + goto out; + } + pci_priv->pci_link_state = PCI_LINK_UP; + ret = pci_enable_device(pci_dev); + if (ret) + cnss_pr_err("Failed to enable PCI device, err = %d\n", + ret); + } + + cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE); + pci_set_master(pci_dev); + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME); + cnss_pci_set_auto_suspended(pci_priv, 0); + + bus_bw_info = &plat_priv->bus_bw_info; + msm_bus_scale_client_update_request(bus_bw_info->bus_client, + bus_bw_info->current_bw_vote); +out: + return ret; +} +EXPORT_SYMBOL(cnss_auto_resume); + +int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; + + if (!fw_mem->va && fw_mem->size) { + fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, + fw_mem->size, &fw_mem->pa, + GFP_KERNEL); + if (!fw_mem->va) { + cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n", + fw_mem->size); + fw_mem->size = 0; + + return -ENOMEM; + } + } + + return 0; +} + +static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; + + if (fw_mem->va && fw_mem->size) { + cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", + fw_mem->va, &fw_mem->pa, fw_mem->size); + dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size, + fw_mem->va, fw_mem->pa); + fw_mem->va = NULL; + fw_mem->pa = 0; + fw_mem->size = 0; + } +} + +int cnss_pci_load_m3(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; + char filename[MAX_M3_FILE_NAME_LENGTH]; + const struct firmware *fw_entry; + int ret = 0; + + if (!m3_mem->va && !m3_mem->size) { + snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME); + + ret = request_firmware(&fw_entry, filename, + &pci_priv->pci_dev->dev); + if (ret) { + cnss_pr_err("Failed to load M3 image: %s\n", filename); + return ret; + } + + m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, + fw_entry->size, &m3_mem->pa, + GFP_KERNEL); + if (!m3_mem->va) { + cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n", + fw_entry->size); + release_firmware(fw_entry); + return -ENOMEM; + } + + memcpy(m3_mem->va, fw_entry->data, fw_entry->size); + m3_mem->size = fw_entry->size; + release_firmware(fw_entry); + } + + return 0; +} + +static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; + + if (m3_mem->va && m3_mem->size) { + cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n", + m3_mem->va, &m3_mem->pa, m3_mem->size); + dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size, + m3_mem->va, m3_mem->pa); + } + + m3_mem->va = NULL; + m3_mem->pa = 0; + m3_mem->size = 0; +} + +int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va, + phys_addr_t *pa) +{ + if (!pci_priv) + return -ENODEV; + + *va = pci_priv->bar; + *pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM); + + return 0; +} + +#ifdef CONFIG_CNSS_QCA6290 +#define PCI_MAX_BAR_SIZE 0xD00000 + +static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar, + unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = PCI_MAX_BAR_SIZE; + unsigned long flags = pci_resource_flags(dev, bar); + + if (!len || !start) + return NULL; + + if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) { + if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO)) + return ioremap(start, len); + else + return ioremap_nocache(start, len); + } + + return NULL; +} +#else +static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar, + unsigned long maxlen) +{ + return pci_iomap(dev, bar, maxlen); +} +#endif + +static struct cnss_msi_config msi_config = { + .total_vectors = 32, + .total_users = 4, + .users = (struct cnss_msi_user[]) { + { .name = "MHI", .num_vectors = 2, .base_vector = 0 }, + { .name = "CE", .num_vectors = 11, .base_vector = 2 }, + { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, + { .name = "DP", .num_vectors = 18, .base_vector = 14 }, + }, +}; + +static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv) +{ + pci_priv->msi_config = &msi_config; + + return 0; +} + +static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + struct pci_dev *pci_dev = pci_priv->pci_dev; + int num_vectors; + struct cnss_msi_config *msi_config; + struct msi_desc *msi_desc; + + ret = cnss_pci_get_msi_assignment(pci_priv); + if (ret) { + cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret); + goto out; + } + + msi_config = pci_priv->msi_config; + if (!msi_config) { + cnss_pr_err("msi_config is NULL!\n"); + ret = -EINVAL; + goto out; + } + + num_vectors = pci_enable_msi_range(pci_dev, + msi_config->total_vectors, + msi_config->total_vectors); + if (num_vectors != msi_config->total_vectors) { + cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d", + msi_config->total_vectors, num_vectors); + ret = -EINVAL; + goto reset_msi_config; + } + + msi_desc = irq_get_msi_desc(pci_dev->irq); + if (!msi_desc) { + cnss_pr_err("msi_desc is NULL!\n"); + ret = -EINVAL; + goto disable_msi; + } + + pci_priv->msi_ep_base_data = msi_desc->msg.data; + if (!pci_priv->msi_ep_base_data) { + cnss_pr_err("Got 0 MSI base data!\n"); + CNSS_ASSERT(0); + } + + cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data); + + return 0; + +disable_msi: + pci_disable_msi(pci_priv->pci_dev); +reset_msi_config: + pci_priv->msi_config = NULL; +out: + return ret; +} + +static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv) +{ + pci_disable_msi(pci_priv->pci_dev); +} + +int cnss_get_user_msi_assignment(struct device *dev, char *user_name, + int *num_vectors, u32 *user_base_data, + u32 *base_vector) +{ + struct cnss_pci_data *pci_priv = dev_get_drvdata(dev); + struct cnss_msi_config *msi_config; + int idx; + + if (!pci_priv) + return -ENODEV; + + msi_config = pci_priv->msi_config; + if (!msi_config) { + cnss_pr_err("MSI is not supported.\n"); + return -EINVAL; + } + + for (idx = 0; idx < msi_config->total_users; idx++) { + if (strcmp(user_name, msi_config->users[idx].name) == 0) { + *num_vectors = msi_config->users[idx].num_vectors; + *user_base_data = msi_config->users[idx].base_vector + + pci_priv->msi_ep_base_data; + *base_vector = msi_config->users[idx].base_vector; + + cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", + user_name, *num_vectors, *user_base_data, + *base_vector); + + return 0; + } + } + + cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name); + + return -EINVAL; +} +EXPORT_SYMBOL(cnss_get_user_msi_assignment); + +int cnss_get_msi_irq(struct device *dev, unsigned int vector) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + + return pci_dev->irq + vector; +} +EXPORT_SYMBOL(cnss_get_msi_irq); + +void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low, + u32 *msi_addr_high) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, + msi_addr_low); + + pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, + msi_addr_high); +} +EXPORT_SYMBOL(cnss_get_msi_address); + +static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + struct pci_dev *pci_dev = pci_priv->pci_dev; + u16 device_id; + + pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id); + if (device_id != pci_priv->pci_device_id->device) { + cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n", + device_id, pci_priv->pci_device_id->device); + ret = -EIO; + goto out; + } + + ret = pci_assign_resource(pci_dev, PCI_BAR_NUM); + if (ret) { + pr_err("Failed to assign PCI resource, err = %d\n", ret); + goto out; + } + + ret = pci_enable_device(pci_dev); + if (ret) { + cnss_pr_err("Failed to enable PCI device, err = %d\n", ret); + goto out; + } + + ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss"); + if (ret) { + cnss_pr_err("Failed to request PCI region, err = %d\n", ret); + goto disable_device; + } + + ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK)); + if (ret) { + cnss_pr_err("Failed to set PCI DMA mask (%d), err = %d\n", + ret, PCI_DMA_MASK); + goto release_region; + } + + ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK)); + if (ret) { + cnss_pr_err("Failed to set PCI consistent DMA mask (%d), err = %d\n", + ret, PCI_DMA_MASK); + goto release_region; + } + + pci_set_master(pci_dev); + + pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0); + if (!pci_priv->bar) { + cnss_pr_err("Failed to do PCI IO map!\n"); + ret = -EIO; + goto clear_master; + } + return 0; + +clear_master: + pci_clear_master(pci_dev); +release_region: + pci_release_region(pci_dev, PCI_BAR_NUM); +disable_device: + pci_disable_device(pci_dev); +out: + return ret; +} + +static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv) +{ + struct pci_dev *pci_dev = pci_priv->pci_dev; + + if (pci_priv->bar) { + pci_iounmap(pci_dev, pci_priv->bar); + pci_priv->bar = NULL; + } + + pci_clear_master(pci_dev); + pci_release_region(pci_dev, PCI_BAR_NUM); + pci_disable_device(pci_dev); +} + +static int cnss_mhi_pm_runtime_get(struct pci_dev *pci_dev) +{ + return pm_runtime_get(&pci_dev->dev); +} + +static void cnss_mhi_pm_runtime_put_noidle(struct pci_dev *pci_dev) +{ + pm_runtime_put_noidle(&pci_dev->dev); +} + +static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state) +{ + switch (mhi_state) { + case CNSS_MHI_INIT: + return "INIT"; + case CNSS_MHI_DEINIT: + return "DEINIT"; + case CNSS_MHI_POWER_ON: + return "POWER_ON"; + case CNSS_MHI_POWER_OFF: + return "POWER_OFF"; + case CNSS_MHI_SUSPEND: + return "SUSPEND"; + case CNSS_MHI_RESUME: + return "RESUME"; + case CNSS_MHI_TRIGGER_RDDM: + return "TRIGGER_RDDM"; + case CNSS_MHI_RDDM: + return "RDDM"; + case CNSS_MHI_RDDM_KERNEL_PANIC: + return "RDDM_KERNEL_PANIC"; + case CNSS_MHI_NOTIFY_LINK_ERROR: + return "NOTIFY_LINK_ERROR"; + default: + return "UNKNOWN"; + } +}; + +static void *cnss_pci_collect_dump_seg(struct cnss_pci_data *pci_priv, + enum mhi_rddm_segment type, + void *start_addr) +{ + int count; + struct scatterlist *sg_list, *s; + unsigned int i; + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_dump_data *dump_data = + &plat_priv->ramdump_info_v2.dump_data; + struct cnss_dump_seg *dump_seg = start_addr; + + count = mhi_xfer_rddm(&pci_priv->mhi_dev, type, &sg_list); + if (count <= 0 || !sg_list) { + cnss_pr_err("Invalid dump_seg for type %u, count %u, sg_list %pK\n", + type, count, sg_list); + return start_addr; + } + + cnss_pr_dbg("Collect dump seg: type %u, nentries %d\n", type, count); + + for_each_sg(sg_list, s, count, i) { + dump_seg->address = sg_dma_address(s); + dump_seg->v_address = sg_virt(s); + dump_seg->size = s->length; + dump_seg->type = type; + cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n", + i, dump_seg->address, + dump_seg->v_address, dump_seg->size); + dump_seg++; + } + + dump_data->nentries += count; + + return dump_seg; +} + +void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + struct cnss_dump_data *dump_data = + &plat_priv->ramdump_info_v2.dump_data; + void *start_addr, *end_addr; + + dump_data->nentries = 0; + + start_addr = plat_priv->ramdump_info_v2.dump_data_vaddr; + end_addr = cnss_pci_collect_dump_seg(pci_priv, + MHI_RDDM_FW_SEGMENT, start_addr); + + start_addr = end_addr; + end_addr = cnss_pci_collect_dump_seg(pci_priv, + MHI_RDDM_RD_SEGMENT, start_addr); + + if (dump_data->nentries > 0) + plat_priv->ramdump_info_v2.dump_data_valid = true; +} + +void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + + plat_priv->ramdump_info_v2.dump_data.nentries = 0; + plat_priv->ramdump_info_v2.dump_data_valid = false; +} + +static void cnss_mhi_notify_status(enum MHI_CB_REASON reason, void *priv) +{ + struct cnss_pci_data *pci_priv = priv; + struct cnss_plat_data *plat_priv = pci_priv->plat_priv; + enum cnss_recovery_reason cnss_reason = CNSS_REASON_RDDM; + + if (!pci_priv) + return; + + cnss_pr_dbg("MHI status cb is called with reason %d\n", reason); + + set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state); + del_timer(&plat_priv->fw_boot_timer); + + if (reason == MHI_CB_SYS_ERROR) + cnss_reason = CNSS_REASON_TIMEOUT; + + cnss_schedule_recovery(&pci_priv->pci_dev->dev, + cnss_reason); +} + +static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + struct pci_dev *pci_dev = pci_priv->pci_dev; + struct mhi_device *mhi_dev = &pci_priv->mhi_dev; + + mhi_dev->dev = &pci_priv->plat_priv->plat_dev->dev; + mhi_dev->pci_dev = pci_dev; + + mhi_dev->resources[0].start = (resource_size_t)pci_priv->bar; + mhi_dev->resources[0].end = (resource_size_t)pci_priv->bar + + pci_resource_len(pci_dev, PCI_BAR_NUM); + mhi_dev->resources[0].flags = + pci_resource_flags(pci_dev, PCI_BAR_NUM); + mhi_dev->resources[0].name = "BAR"; + cnss_pr_dbg("BAR start is %pa, BAR end is %pa\n", + &mhi_dev->resources[0].start, &mhi_dev->resources[0].end); + + if (!mhi_dev->resources[1].start) { + mhi_dev->resources[1].start = pci_dev->irq; + mhi_dev->resources[1].end = pci_dev->irq + 1; + mhi_dev->resources[1].flags = IORESOURCE_IRQ; + mhi_dev->resources[1].name = "IRQ"; + } + cnss_pr_dbg("IRQ start is %pa, IRQ end is %pa\n", + &mhi_dev->resources[1].start, &mhi_dev->resources[1].end); + + mhi_dev->pm_runtime_get = cnss_mhi_pm_runtime_get; + mhi_dev->pm_runtime_put_noidle = cnss_mhi_pm_runtime_put_noidle; + + mhi_dev->support_rddm = true; + mhi_dev->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size; + mhi_dev->status_cb = cnss_mhi_notify_status; + + ret = mhi_register_device(mhi_dev, MHI_NODE_NAME, pci_priv); + if (ret) { + cnss_pr_err("Failed to register as MHI device, err = %d\n", + ret); + return ret; + } + + return 0; +} + +static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv) +{ +} + +static enum mhi_dev_ctrl cnss_to_mhi_dev_state(enum cnss_mhi_state state) +{ + switch (state) { + case CNSS_MHI_INIT: + return MHI_DEV_CTRL_INIT; + case CNSS_MHI_DEINIT: + return MHI_DEV_CTRL_DE_INIT; + case CNSS_MHI_POWER_ON: + return MHI_DEV_CTRL_POWER_ON; + case CNSS_MHI_POWER_OFF: + return MHI_DEV_CTRL_POWER_OFF; + case CNSS_MHI_SUSPEND: + return MHI_DEV_CTRL_SUSPEND; + case CNSS_MHI_RESUME: + return MHI_DEV_CTRL_RESUME; + case CNSS_MHI_TRIGGER_RDDM: + return MHI_DEV_CTRL_TRIGGER_RDDM; + case CNSS_MHI_RDDM: + return MHI_DEV_CTRL_RDDM; + case CNSS_MHI_RDDM_KERNEL_PANIC: + return MHI_DEV_CTRL_RDDM_KERNEL_PANIC; + case CNSS_MHI_NOTIFY_LINK_ERROR: + return MHI_DEV_CTRL_NOTIFY_LINK_ERROR; + default: + cnss_pr_err("Unknown CNSS MHI state (%d)\n", state); + return -EINVAL; + } +} + +static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state mhi_state) +{ + switch (mhi_state) { + case CNSS_MHI_INIT: + if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_DEINIT: + case CNSS_MHI_POWER_ON: + if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) && + !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_POWER_OFF: + case CNSS_MHI_SUSPEND: + if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) && + !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_RESUME: + if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state)) + return 0; + break; + case CNSS_MHI_TRIGGER_RDDM: + case CNSS_MHI_RDDM: + case CNSS_MHI_RDDM_KERNEL_PANIC: + case CNSS_MHI_NOTIFY_LINK_ERROR: + return 0; + default: + cnss_pr_err("Unhandled MHI state: %s(%d)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state); + } + + cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state, + pci_priv->mhi_state); + + return -EINVAL; +} + +static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state mhi_state) +{ + switch (mhi_state) { + case CNSS_MHI_INIT: + set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); + break; + case CNSS_MHI_DEINIT: + clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state); + break; + case CNSS_MHI_POWER_ON: + set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); + break; + case CNSS_MHI_POWER_OFF: + clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state); + break; + case CNSS_MHI_SUSPEND: + set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); + break; + case CNSS_MHI_RESUME: + clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state); + break; + case CNSS_MHI_TRIGGER_RDDM: + case CNSS_MHI_RDDM: + case CNSS_MHI_RDDM_KERNEL_PANIC: + case CNSS_MHI_NOTIFY_LINK_ERROR: + break; + default: + cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state); + } +} + +int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state mhi_state) +{ + int ret = 0; + enum mhi_dev_ctrl mhi_dev_state = cnss_to_mhi_dev_state(mhi_state); + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return -ENODEV; + } + + if (pci_priv->device_id == QCA6174_DEVICE_ID) + return 0; + + if (mhi_dev_state < 0) { + cnss_pr_err("Invalid MHI DEV state (%d)\n", mhi_dev_state); + return -EINVAL; + } + + ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state); + if (ret) + goto out; + + cnss_pr_dbg("Setting MHI state: %s(%d)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state); + ret = mhi_pm_control_device(&pci_priv->mhi_dev, mhi_dev_state); + if (ret) { + cnss_pr_err("Failed to set MHI state: %s(%d)\n", + cnss_mhi_state_to_str(mhi_state), mhi_state); + goto out; + } + + cnss_pci_set_mhi_state_bit(pci_priv, mhi_state); + +out: + return ret; +} + +int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv) +{ + int ret = 0; + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return -ENODEV; + } + + if (fbc_bypass) + return 0; + + ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT); + if (ret) + goto out; + + ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON); + if (ret) + goto deinit_mhi; + + return 0; + +deinit_mhi: + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT); +out: + return ret; +} + +void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv) +{ + struct cnss_plat_data *plat_priv; + + if (!pci_priv) { + cnss_pr_err("pci_priv is NULL!\n"); + return; + } + + if (fbc_bypass) + return; + + plat_priv = pci_priv->plat_priv; + + cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME); + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF); + if (!plat_priv->ramdump_info_v2.dump_data_valid) + cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT); +} + +static int cnss_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + int ret = 0; + struct cnss_pci_data *pci_priv; + struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL); + struct resource *res; + + cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n", + id->vendor, pci_dev->device); + + switch (pci_dev->device) { + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + if (!mhi_is_device_ready(&plat_priv->plat_dev->dev, + MHI_NODE_NAME)) { + cnss_pr_err("MHI driver is not ready, defer PCI probe!\n"); + ret = -EPROBE_DEFER; + goto out; + } + break; + default: + break; + } + + pci_priv = devm_kzalloc(&pci_dev->dev, sizeof(*pci_priv), + GFP_KERNEL); + if (!pci_priv) { + ret = -ENOMEM; + goto out; + } + + pci_priv->pci_link_state = PCI_LINK_UP; + pci_priv->plat_priv = plat_priv; + pci_priv->pci_dev = pci_dev; + pci_priv->pci_device_id = id; + pci_priv->device_id = pci_dev->device; + cnss_set_pci_priv(pci_dev, pci_priv); + plat_priv->device_id = pci_dev->device; + plat_priv->bus_priv = pci_priv; + + ret = cnss_register_subsys(plat_priv); + if (ret) + goto reset_ctx; + + ret = cnss_register_ramdump(plat_priv); + if (ret) + goto unregister_subsys; + + res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM, + "smmu_iova_base"); + if (res) { + pci_priv->smmu_iova_start = res->start; + pci_priv->smmu_iova_len = resource_size(res); + cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n", + &pci_priv->smmu_iova_start, + pci_priv->smmu_iova_len); + + ret = cnss_pci_init_smmu(pci_priv); + if (ret) { + cnss_pr_err("Failed to init SMMU, err = %d\n", ret); + goto unregister_ramdump; + } + } + + ret = cnss_reg_pci_event(pci_priv); + if (ret) { + cnss_pr_err("Failed to register PCI event, err = %d\n", ret); + goto deinit_smmu; + } + + ret = cnss_pci_enable_bus(pci_priv); + if (ret) + goto dereg_pci_event; + + switch (pci_dev->device) { + case QCA6174_DEVICE_ID: + pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET, + &pci_priv->revision_id); + ret = cnss_suspend_pci_link(pci_priv); + if (ret) + cnss_pr_err("Failed to suspend PCI link, err = %d\n", + ret); + cnss_power_off_device(plat_priv); + break; + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + ret = cnss_pci_enable_msi(pci_priv); + if (ret) + goto disable_bus; + ret = cnss_pci_register_mhi(pci_priv); + if (ret) { + cnss_pci_disable_msi(pci_priv); + goto disable_bus; + } + break; + default: + cnss_pr_err("Unknown PCI device found: 0x%x\n", + pci_dev->device); + ret = -ENODEV; + goto disable_bus; + } + + return 0; + +disable_bus: + cnss_pci_disable_bus(pci_priv); +dereg_pci_event: + cnss_dereg_pci_event(pci_priv); +deinit_smmu: + if (pci_priv->smmu_mapping) + cnss_pci_deinit_smmu(pci_priv); +unregister_ramdump: + cnss_unregister_ramdump(plat_priv); +unregister_subsys: + cnss_unregister_subsys(plat_priv); +reset_ctx: + plat_priv->bus_priv = NULL; +out: + return ret; +} + +static void cnss_pci_remove(struct pci_dev *pci_dev) +{ + struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev); + struct cnss_plat_data *plat_priv = + cnss_bus_dev_to_plat_priv(&pci_dev->dev); + + cnss_pci_free_m3_mem(pci_priv); + cnss_pci_free_fw_mem(pci_priv); + + switch (pci_dev->device) { + case QCA6290_EMULATION_DEVICE_ID: + case QCA6290_DEVICE_ID: + cnss_pci_unregister_mhi(pci_priv); + cnss_pci_disable_msi(pci_priv); + break; + default: + break; + } + + cnss_pci_disable_bus(pci_priv); + cnss_dereg_pci_event(pci_priv); + if (pci_priv->smmu_mapping) + cnss_pci_deinit_smmu(pci_priv); + cnss_unregister_ramdump(plat_priv); + cnss_unregister_subsys(plat_priv); + plat_priv->bus_priv = NULL; +} + +static const struct pci_device_id cnss_pci_id_table[] = { + { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, + { QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID }, + { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; +MODULE_DEVICE_TABLE(pci, cnss_pci_id_table); + +static const struct dev_pm_ops cnss_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq, + cnss_pci_resume_noirq) + SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume, + cnss_pci_runtime_idle) +}; + +struct pci_driver cnss_pci_driver = { + .name = "cnss_pci", + .id_table = cnss_pci_id_table, + .probe = cnss_pci_probe, + .remove = cnss_pci_remove, + .driver = { + .pm = &cnss_pm_ops, + }, +}; + +int cnss_pci_init(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct device *dev = &plat_priv->plat_dev->dev; + u32 rc_num; + + ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num); + if (ret) { + cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret); + goto out; + } + + ret = msm_pcie_enumerate(rc_num); + if (ret) { + cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n", + rc_num, ret); + goto out; + } + + ret = pci_register_driver(&cnss_pci_driver); + if (ret) { + cnss_pr_err("Failed to register to PCI framework, err = %d\n", + ret); + goto out; + } + + return 0; +out: + return ret; +} + +void cnss_pci_deinit(struct cnss_plat_data *plat_priv) +{ + pci_unregister_driver(&cnss_pci_driver); +} diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h new file mode 100644 index 000000000000..4dc29c3c1f10 --- /dev/null +++ b/drivers/net/wireless/cnss2/pci.h @@ -0,0 +1,141 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CNSS_PCI_H +#define _CNSS_PCI_H + +#include <asm/dma-iommu.h> +#include <linux/iommu.h> +#include <linux/msm_mhi.h> +#include <linux/msm_pcie.h> +#include <linux/pci.h> + +#include "main.h" + +#define QCA6174_VENDOR_ID 0x168C +#define QCA6174_DEVICE_ID 0x003E +#define QCA6174_REV_ID_OFFSET 0x08 +#define QCA6174_REV3_VERSION 0x5020000 +#define QCA6174_REV3_2_VERSION 0x5030000 +#define QCA6290_VENDOR_ID 0x17CB +#define QCA6290_DEVICE_ID 0x1100 +#define QCA6290_EMULATION_VENDOR_ID 0x168C +#define QCA6290_EMULATION_DEVICE_ID 0xABCD + +enum cnss_mhi_state { + CNSS_MHI_INIT, + CNSS_MHI_DEINIT, + CNSS_MHI_SUSPEND, + CNSS_MHI_RESUME, + CNSS_MHI_POWER_OFF, + CNSS_MHI_POWER_ON, + CNSS_MHI_TRIGGER_RDDM, + CNSS_MHI_RDDM, + CNSS_MHI_RDDM_KERNEL_PANIC, + CNSS_MHI_NOTIFY_LINK_ERROR, +}; + +struct cnss_msi_user { + char *name; + int num_vectors; + u32 base_vector; +}; + +struct cnss_msi_config { + int total_vectors; + int total_users; + struct cnss_msi_user *users; +}; + +struct cnss_pci_data { + struct pci_dev *pci_dev; + struct cnss_plat_data *plat_priv; + const struct pci_device_id *pci_device_id; + u32 device_id; + u16 revision_id; + bool pci_link_state; + bool pci_link_down_ind; + struct pci_saved_state *saved_state; + struct msm_pcie_register_event msm_pci_event; + atomic_t auto_suspended; + bool monitor_wake_intr; + struct dma_iommu_mapping *smmu_mapping; + dma_addr_t smmu_iova_start; + size_t smmu_iova_len; + void __iomem *bar; + struct cnss_msi_config *msi_config; + u32 msi_ep_base_data; + struct mhi_device mhi_dev; + unsigned long mhi_state; +}; + +static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data) +{ + pci_set_drvdata(pci_dev, data); +} + +static inline struct cnss_pci_data *cnss_get_pci_priv(struct pci_dev *pci_dev) +{ + return pci_get_drvdata(pci_dev); +} + +static inline struct cnss_plat_data *cnss_pci_priv_to_plat_priv(void *bus_priv) +{ + struct cnss_pci_data *pci_priv = bus_priv; + + return pci_priv->plat_priv; +} + +static inline void cnss_pci_set_monitor_wake_intr(void *bus_priv, bool val) +{ + struct cnss_pci_data *pci_priv = bus_priv; + + pci_priv->monitor_wake_intr = val; +} + +static inline bool cnss_pci_get_monitor_wake_intr(void *bus_priv) +{ + struct cnss_pci_data *pci_priv = bus_priv; + + return pci_priv->monitor_wake_intr; +} + +static inline void cnss_pci_set_auto_suspended(void *bus_priv, int val) +{ + struct cnss_pci_data *pci_priv = bus_priv; + + atomic_set(&pci_priv->auto_suspended, val); +} + +static inline int cnss_pci_get_auto_suspended(void *bus_priv) +{ + struct cnss_pci_data *pci_priv = bus_priv; + + return atomic_read(&pci_priv->auto_suspended); +} + +int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv); +int cnss_resume_pci_link(struct cnss_pci_data *pci_priv); +int cnss_pci_init(struct cnss_plat_data *plat_priv); +void cnss_pci_deinit(struct cnss_plat_data *plat_priv); +int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv); +int cnss_pci_load_m3(struct cnss_pci_data *pci_priv); +int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va, + phys_addr_t *pa); +int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv, + enum cnss_mhi_state state); +int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv); +void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv); +void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv); +void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv); + +#endif /* _CNSS_PCI_H */ diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c new file mode 100644 index 000000000000..8ed1507bde11 --- /dev/null +++ b/drivers/net/wireless/cnss2/power.c @@ -0,0 +1,386 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/pinctrl/consumer.h> +#include <linux/regulator/consumer.h> + +#include "main.h" +#include "debug.h" + +static struct cnss_vreg_info cnss_vreg_info[] = { + {NULL, "vdd-wlan-core", 1300000, 1300000, 0, 0}, + {NULL, "vdd-wlan-io", 1800000, 1800000, 0, 0}, + {NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0}, + {NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2}, + {NULL, "vdd-wlan", 0, 0, 0, 0}, + {NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0}, + {NULL, "wlan-ant-switch", 2700000, 2700000, 20000, 0}, + {NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0}, + {NULL, "vdd-wlan-en", 0, 0, 0, 10}, +}; + +#define CNSS_VREG_INFO_SIZE ARRAY_SIZE(cnss_vreg_info) +#define MAX_PROP_SIZE 32 + +#define BOOTSTRAP_GPIO "qcom,enable-bootstrap-gpio" +#define BOOTSTRAP_ACTIVE "bootstrap_active" +#define WLAN_EN_GPIO "wlan-en-gpio" +#define WLAN_EN_ACTIVE "wlan_en_active" +#define WLAN_EN_SLEEP "wlan_en_sleep" + +#define BOOTSTRAP_DELAY 1000 +#define WLAN_ENABLE_DELAY 1000 + +int cnss_get_vreg(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + int i; + struct cnss_vreg_info *vreg_info; + struct device *dev; + struct regulator *reg; + const __be32 *prop; + char prop_name[MAX_PROP_SIZE]; + int len; + + dev = &plat_priv->plat_dev->dev; + + plat_priv->vreg_info = devm_kzalloc(dev, sizeof(cnss_vreg_info), + GFP_KERNEL); + if (!plat_priv->vreg_info) { + ret = -ENOMEM; + goto out; + } + + memcpy(plat_priv->vreg_info, cnss_vreg_info, sizeof(cnss_vreg_info)); + + for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) { + vreg_info = &plat_priv->vreg_info[i]; + reg = devm_regulator_get_optional(dev, vreg_info->name); + if (IS_ERR(reg)) { + ret = PTR_ERR(reg); + if (ret == -ENODEV) + continue; + else if (ret == -EPROBE_DEFER) + cnss_pr_info("EPROBE_DEFER for regulator: %s\n", + vreg_info->name); + else + cnss_pr_err("Failed to get regulator %s, err = %d\n", + vreg_info->name, ret); + goto out; + } + + vreg_info->reg = reg; + + snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info", + vreg_info->name); + + prop = of_get_property(dev->of_node, prop_name, &len); + cnss_pr_dbg("Got regulator info, name: %s, len: %d\n", + prop_name, len); + + if (!prop || len != (4 * sizeof(__be32))) { + cnss_pr_dbg("Property %s %s, use default\n", prop_name, + prop ? "invalid format" : "doesn't exist"); + } else { + vreg_info->min_uv = be32_to_cpup(&prop[0]); + vreg_info->max_uv = be32_to_cpup(&prop[1]); + vreg_info->load_ua = be32_to_cpup(&prop[2]); + vreg_info->delay_us = be32_to_cpup(&prop[3]); + } + + cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n", + vreg_info->name, vreg_info->min_uv, + vreg_info->max_uv, vreg_info->load_ua, + vreg_info->delay_us); + } + + return 0; +out: + return ret; +} + +static int cnss_vreg_on(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_vreg_info *vreg_info; + int i; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) { + vreg_info = &plat_priv->vreg_info[i]; + + if (!vreg_info->reg) + continue; + + cnss_pr_dbg("Regulator %s is being enabled\n", vreg_info->name); + + if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) { + ret = regulator_set_voltage(vreg_info->reg, + vreg_info->min_uv, + vreg_info->max_uv); + + if (ret) { + cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n", + vreg_info->name, vreg_info->min_uv, + vreg_info->max_uv, ret); + break; + } + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, + vreg_info->load_ua); + + if (ret < 0) { + cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n", + vreg_info->name, vreg_info->load_ua, + ret); + break; + } + } + + if (vreg_info->delay_us) + udelay(vreg_info->delay_us); + + ret = regulator_enable(vreg_info->reg); + if (ret) { + cnss_pr_err("Failed to enable regulator %s, err = %d\n", + vreg_info->name, ret); + break; + } + } + + if (ret) { + for (; i >= 0; i--) { + vreg_info = &plat_priv->vreg_info[i]; + + if (!vreg_info->reg) + continue; + + regulator_disable(vreg_info->reg); + if (vreg_info->load_ua) + regulator_set_load(vreg_info->reg, 0); + if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) + regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_uv); + } + + return ret; + } + + return 0; +} + +static int cnss_vreg_off(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct cnss_vreg_info *vreg_info; + int i; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return -ENODEV; + } + + for (i = CNSS_VREG_INFO_SIZE - 1; i >= 0; i--) { + vreg_info = &plat_priv->vreg_info[i]; + + if (!vreg_info->reg) + continue; + + cnss_pr_dbg("Regulator %s is being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + cnss_pr_err("Failed to disable regulator %s, err = %d\n", + vreg_info->name, ret); + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + cnss_pr_err("Failed to set load for regulator %s, err = %d\n", + vreg_info->name, ret); + } + + if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) { + ret = regulator_set_voltage(vreg_info->reg, 0, + vreg_info->max_uv); + if (ret) + cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n", + vreg_info->name, ret); + } + } + + return ret; +} + +int cnss_get_pinctrl(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + struct device *dev; + struct cnss_pinctrl_info *pinctrl_info; + + dev = &plat_priv->plat_dev->dev; + pinctrl_info = &plat_priv->pinctrl_info; + + pinctrl_info->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) { + ret = PTR_ERR(pinctrl_info->pinctrl); + cnss_pr_err("Failed to get pinctrl, err = %d\n", ret); + goto out; + } + + if (of_find_property(dev->of_node, BOOTSTRAP_GPIO, NULL)) { + pinctrl_info->bootstrap_active = + pinctrl_lookup_state(pinctrl_info->pinctrl, + BOOTSTRAP_ACTIVE); + if (IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) { + ret = PTR_ERR(pinctrl_info->bootstrap_active); + cnss_pr_err("Failed to get bootstrap active state, err = %d\n", + ret); + goto out; + } + } + + if (of_find_property(dev->of_node, WLAN_EN_GPIO, NULL)) { + pinctrl_info->wlan_en_active = + pinctrl_lookup_state(pinctrl_info->pinctrl, + WLAN_EN_ACTIVE); + if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) { + ret = PTR_ERR(pinctrl_info->wlan_en_active); + cnss_pr_err("Failed to get wlan_en active state, err = %d\n", + ret); + goto out; + } + + pinctrl_info->wlan_en_sleep = + pinctrl_lookup_state(pinctrl_info->pinctrl, + WLAN_EN_SLEEP); + if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) { + ret = PTR_ERR(pinctrl_info->wlan_en_sleep); + cnss_pr_err("Failed to get wlan_en sleep state, err = %d\n", + ret); + goto out; + } + } + + return 0; +out: + return ret; +} + +static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv, + bool state) +{ + int ret = 0; + struct cnss_pinctrl_info *pinctrl_info; + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + ret = -ENODEV; + goto out; + } + + pinctrl_info = &plat_priv->pinctrl_info; + + if (state) { + if (!IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) { + ret = pinctrl_select_state(pinctrl_info->pinctrl, + pinctrl_info-> + bootstrap_active); + if (ret) { + cnss_pr_err("Failed to select bootstrap active state, err = %d\n", + ret); + goto out; + } + udelay(BOOTSTRAP_DELAY); + } + + if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) { + ret = pinctrl_select_state(pinctrl_info->pinctrl, + pinctrl_info-> + wlan_en_active); + if (ret) { + cnss_pr_err("Failed to select wlan_en active state, err = %d\n", + ret); + goto out; + } + udelay(WLAN_ENABLE_DELAY); + } + } else { + if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) { + ret = pinctrl_select_state(pinctrl_info->pinctrl, + pinctrl_info->wlan_en_sleep); + if (ret) { + cnss_pr_err("Failed to select wlan_en sleep state, err = %d\n", + ret); + goto out; + } + } + } + + return 0; +out: + return ret; +} + +int cnss_power_on_device(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + ret = cnss_vreg_on(plat_priv); + if (ret) { + cnss_pr_err("Failed to turn on vreg, err = %d\n", ret); + goto out; + } + + ret = cnss_select_pinctrl_state(plat_priv, true); + if (ret) { + cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret); + goto vreg_off; + } + + return 0; +vreg_off: + cnss_vreg_off(plat_priv); +out: + return ret; +} + +void cnss_power_off_device(struct cnss_plat_data *plat_priv) +{ + cnss_select_pinctrl_state(plat_priv, false); + cnss_vreg_off(plat_priv); +} + +void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv) +{ + unsigned long pin_status = 0; + + set_bit(CNSS_WLAN_EN, &pin_status); + set_bit(CNSS_PCIE_TXN, &pin_status); + set_bit(CNSS_PCIE_TXP, &pin_status); + set_bit(CNSS_PCIE_RXN, &pin_status); + set_bit(CNSS_PCIE_RXP, &pin_status); + set_bit(CNSS_PCIE_REFCLKN, &pin_status); + set_bit(CNSS_PCIE_REFCLKP, &pin_status); + set_bit(CNSS_PCIE_RST, &pin_status); + + plat_priv->pin_result.host_pin_result = pin_status; +} diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c new file mode 100644 index 000000000000..db55d3350eb5 --- /dev/null +++ b/drivers/net/wireless/cnss2/qmi.c @@ -0,0 +1,1006 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/qmi_encdec.h> +#include <soc/qcom/msm_qmi_interface.h> + +#include "main.h" +#include "debug.h" +#include "qmi.h" + +#define WLFW_SERVICE_INS_ID_V01 1 +#define WLFW_CLIENT_ID 0x4b4e454c +#define MAX_BDF_FILE_NAME 11 +#define DEFAULT_BDF_FILE_NAME "bdwlan.elf" +#define BDF_FILE_NAME_PREFIX "bdwlan.e" + +#ifdef CONFIG_CNSS2_DEBUG +static unsigned int qmi_timeout = 10000; +module_param(qmi_timeout, uint, 0600); +MODULE_PARM_DESC(qmi_timeout, "Timeout for QMI message in milliseconds"); + +#define QMI_WLFW_TIMEOUT_MS qmi_timeout +#else +#define QMI_WLFW_TIMEOUT_MS 10000 +#endif + +static bool daemon_support; +module_param(daemon_support, bool, 0600); +MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not"); + +static bool bdf_bypass = true; +#ifdef CONFIG_CNSS2_DEBUG +module_param(bdf_bypass, bool, 0600); +MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW"); +#endif + +enum cnss_bdf_type { + CNSS_BDF_BIN, + CNSS_BDF_ELF, +}; + +static void cnss_wlfw_clnt_notifier_work(struct work_struct *work) +{ + struct cnss_plat_data *plat_priv = + container_of(work, struct cnss_plat_data, qmi_recv_msg_work); + int ret = 0; + + cnss_pr_dbg("Receiving QMI WLFW event in work queue context\n"); + + do { + ret = qmi_recv_msg(plat_priv->qmi_wlfw_clnt); + } while (ret == 0); + + if (ret != -ENOMSG) + cnss_pr_err("Error receiving message: %d\n", ret); + + cnss_pr_dbg("Receiving QMI event completed\n"); +} + +static void cnss_wlfw_clnt_notifier(struct qmi_handle *handle, + enum qmi_event_type event, + void *notify_priv) +{ + struct cnss_plat_data *plat_priv = notify_priv; + + cnss_pr_dbg("Received QMI WLFW event: %d\n", event); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return; + } + + switch (event) { + case QMI_RECV_MSG: + schedule_work(&plat_priv->qmi_recv_msg_work); + break; + case QMI_SERVER_EXIT: + break; + default: + cnss_pr_dbg("Unhandled QMI event: %d\n", event); + break; + } +} + +static int cnss_wlfw_clnt_svc_event_notifier(struct notifier_block *nb, + unsigned long code, void *_cmd) +{ + struct cnss_plat_data *plat_priv = + container_of(nb, struct cnss_plat_data, qmi_wlfw_clnt_nb); + int ret = 0; + + cnss_pr_dbg("Received QMI WLFW service event: %ld\n", code); + + switch (code) { + case QMI_SERVER_ARRIVE: + ret = cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_SERVER_ARRIVE, + false, NULL); + break; + + case QMI_SERVER_EXIT: + ret = cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_SERVER_EXIT, + false, NULL); + break; + default: + cnss_pr_dbg("Invalid QMI service event: %ld\n", code); + break; + } + + return ret; +} + +static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_host_cap_req_msg_v01 req; + struct wlfw_host_cap_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + cnss_pr_dbg("Sending host capability message, state: 0x%lx\n", + plat_priv->driver_state); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.daemon_support_valid = 1; + req.daemon_support = daemon_support; + + cnss_pr_dbg("daemon_support is %d\n", req.daemon_support); + + req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01; + req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_HOST_CAP_RESP_V01; + resp_desc.ei_array = wlfw_host_cap_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send host capability request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("Host capability request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; +out: + CNSS_ASSERT(0); + return ret; +} + +static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_ind_register_req_msg_v01 req; + struct wlfw_ind_register_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + cnss_pr_dbg("Sending indication register message, state: 0x%lx\n", + plat_priv->driver_state); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.client_id_valid = 1; + req.client_id = WLFW_CLIENT_ID; + req.fw_ready_enable_valid = 1; + req.fw_ready_enable = 1; + req.request_mem_enable_valid = 1; + req.request_mem_enable = 1; + req.fw_mem_ready_enable_valid = 1; + req.fw_mem_ready_enable = 1; + req.cold_boot_cal_done_enable_valid = 1; + req.cold_boot_cal_done_enable = 1; + req.pin_connect_result_enable_valid = 1; + req.pin_connect_result_enable = 1; + + req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01; + req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01; + resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send indication register request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("Indication register request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; +out: + CNSS_ASSERT(0); + return ret; +} + +static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv, + void *msg, unsigned int msg_len) +{ + struct msg_desc ind_desc; + struct wlfw_request_mem_ind_msg_v01 ind_msg; + struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; + int ret = 0; + + ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01; + ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN; + ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei; + + ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + if (ret < 0) { + cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n", + ret, msg_len); + return ret; + } + + fw_mem->size = ind_msg.size; + + cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM, + false, NULL); + + return 0; +} + +static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv, + void *msg, unsigned int msg_len) +{ + struct msg_desc ind_desc; + struct wlfw_pin_connect_result_ind_msg_v01 ind_msg; + int ret = 0; + + ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01; + ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN; + ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei; + + ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + if (ret < 0) { + cnss_pr_err("Failed to decode pin connect result indication, msg_len: %u, err = %d\n", + msg_len, ret); + return ret; + } + if (ind_msg.pwr_pin_result_valid) + plat_priv->pin_result.fw_pwr_pin_result = + ind_msg.pwr_pin_result; + if (ind_msg.phy_io_pin_result_valid) + plat_priv->pin_result.fw_phy_io_pin_result = + ind_msg.phy_io_pin_result; + if (ind_msg.rf_pin_result_valid) + plat_priv->pin_result.fw_rf_pin_result = ind_msg.rf_pin_result; + + cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n", + ind_msg.pwr_pin_result, ind_msg.phy_io_pin_result, + ind_msg.rf_pin_result); + return ret; +} + +int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_respond_mem_req_msg_v01 req; + struct wlfw_respond_mem_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; + int ret = 0; + + cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n", + plat_priv->driver_state); + + if (!fw_mem->pa || !fw_mem->size) { + cnss_pr_err("Memory for FW is not available!\n"); + ret = -ENOMEM; + goto out; + } + + cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", + fw_mem->va, &fw_mem->pa, fw_mem->size); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.addr = fw_mem->pa; + req.size = fw_mem->size; + + req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01; + req_desc.ei_array = wlfw_respond_mem_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01; + resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send respond memory request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("Respond memory request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; +out: + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_cap_req_msg_v01 req; + struct wlfw_cap_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + cnss_pr_dbg("Sending target capability message, state: 0x%lx\n", + plat_priv->driver_state); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req_desc.max_msg_len = WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_CAP_REQ_V01; + req_desc.ei_array = wlfw_cap_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_CAP_RESP_V01; + resp_desc.ei_array = wlfw_cap_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send target capability request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("Target capability request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + if (resp.chip_info_valid) + plat_priv->chip_info = resp.chip_info; + if (resp.board_info_valid) + plat_priv->board_info = resp.board_info; + else + plat_priv->board_info.board_id = 0xFF; + if (resp.soc_info_valid) + plat_priv->soc_info = resp.soc_info; + if (resp.fw_version_info_valid) + plat_priv->fw_version_info = resp.fw_version_info; + + cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s", + plat_priv->chip_info.chip_id, + plat_priv->chip_info.chip_family, + plat_priv->board_info.board_id, plat_priv->soc_info.soc_id, + plat_priv->fw_version_info.fw_version, + plat_priv->fw_version_info.fw_build_timestamp); + + return 0; +out: + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_bdf_download_req_msg_v01 *req; + struct wlfw_bdf_download_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + char filename[MAX_BDF_FILE_NAME]; + const struct firmware *fw_entry; + const u8 *temp; + unsigned int remaining; + int ret = 0; + + cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n", + plat_priv->driver_state); + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) { + ret = -ENOMEM; + goto out; + } + + if (plat_priv->board_info.board_id == 0xFF) + snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME); + else + snprintf(filename, sizeof(filename), + BDF_FILE_NAME_PREFIX "%02x", + plat_priv->board_info.board_id); + + ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev); + if (ret) { + cnss_pr_err("Failed to load BDF: %s\n", filename); + if (bdf_bypass) { + cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n"); + temp = filename; + remaining = MAX_BDF_FILE_NAME; + goto bypass_bdf; + } else { + goto err_req_fw; + } + } + + temp = fw_entry->data; + remaining = fw_entry->size; + +bypass_bdf: + cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining); + + memset(&resp, 0, sizeof(resp)); + + req_desc.max_msg_len = WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_REQ_V01; + req_desc.ei_array = wlfw_bdf_download_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_RESP_V01; + resp_desc.ei_array = wlfw_bdf_download_resp_msg_v01_ei; + + while (remaining) { + req->valid = 1; + req->file_id_valid = 1; + req->file_id = plat_priv->board_info.board_id; + req->total_size_valid = 1; + req->total_size = remaining; + req->seg_id_valid = 1; + req->data_valid = 1; + req->end_valid = 1; + req->bdf_type_valid = 1; + req->bdf_type = CNSS_BDF_ELF; + + if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { + req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; + } else { + req->data_len = remaining; + req->end = 1; + } + + memcpy(req->data, temp, req->data_len); + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, + req, sizeof(*req), &resp_desc, &resp, + sizeof(resp), QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send BDF download request, err = %d\n", + ret); + goto err_send; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("BDF download request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto err_send; + } + + remaining -= req->data_len; + temp += req->data_len; + req->seg_id++; + } + +err_send: + if (!bdf_bypass) + release_firmware(fw_entry); +err_req_fw: + kfree(req); +out: + if (ret) + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv) +{ + struct wlfw_m3_info_req_msg_v01 req; + struct wlfw_m3_info_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem; + int ret = 0; + + cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n", + plat_priv->driver_state); + + if (!m3_mem->pa || !m3_mem->size) { + cnss_pr_err("Memory for M3 is not available!\n"); + ret = -ENOMEM; + goto out; + } + + cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n", + m3_mem->va, &m3_mem->pa, m3_mem->size); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.addr = plat_priv->m3_mem.pa; + req.size = plat_priv->m3_mem.size; + + req_desc.max_msg_len = WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_M3_INFO_REQ_V01; + req_desc.ei_array = wlfw_m3_info_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_M3_INFO_RESP_V01; + resp_desc.ei_array = wlfw_m3_info_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send M3 information request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("M3 information request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; + +out: + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv, + enum wlfw_driver_mode_enum_v01 mode) +{ + struct wlfw_wlan_mode_req_msg_v01 req; + struct wlfw_wlan_mode_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + cnss_pr_dbg("Sending mode message, state: 0x%lx, mode: %d\n", + plat_priv->driver_state, mode); + + if (mode == QMI_WLFW_OFF_V01 && + test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) { + cnss_pr_dbg("Recovery is in progress, ignore mode off request.\n"); + return 0; + } + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.mode = mode; + req.hw_debug_valid = 1; + req.hw_debug = 0; + + req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01; + req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01; + resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + if (mode == QMI_WLFW_OFF_V01 && ret == -ENETRESET) { + cnss_pr_dbg("WLFW service is disconnected while sending mode off request.\n"); + return 0; + } + cnss_pr_err("Failed to send mode request, mode: %d, err = %d\n", + mode, ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("Mode request failed, mode: %d, result: %d err: %d\n", + mode, resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; +out: + if (mode != QMI_WLFW_OFF_V01) + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv, + struct wlfw_wlan_cfg_req_msg_v01 *data) +{ + struct wlfw_wlan_cfg_req_msg_v01 req; + struct wlfw_wlan_cfg_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n", + plat_priv->driver_state); + + if (!plat_priv) + return -ENODEV; + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + memcpy(&req, data, sizeof(req)); + + req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01; + req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01; + resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send WLAN config request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("WLAN config request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; +out: + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv, + u32 offset, u32 mem_type, + u32 data_len, u8 *data) +{ + struct wlfw_athdiag_read_req_msg_v01 req; + struct wlfw_athdiag_read_resp_msg_v01 *resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + if (!plat_priv->qmi_wlfw_clnt) + return -EINVAL; + + cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n", + plat_priv->driver_state, offset, mem_type, data_len); + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; + + memset(&req, 0, sizeof(req)); + + req.offset = offset; + req.mem_type = mem_type; + req.data_len = data_len; + + req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01; + req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01; + resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, + sizeof(req), &resp_desc, resp, sizeof(*resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send athdiag read request, err = %d\n", + ret); + goto out; + } + + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("athdiag read request failed, result: %d, err: %d\n", + resp->resp.result, resp->resp.error); + ret = resp->resp.result; + goto out; + } + + if (!resp->data_valid || resp->data_len != data_len) { + cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n", + resp->data_valid, resp->data_len); + ret = -EINVAL; + goto out; + } + + memcpy(data, resp->data, resp->data_len); + +out: + kfree(resp); + return ret; +} + +int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv, + u32 offset, u32 mem_type, + u32 data_len, u8 *data) +{ + struct wlfw_athdiag_write_req_msg_v01 *req; + struct wlfw_athdiag_write_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + if (!plat_priv->qmi_wlfw_clnt) + return -EINVAL; + + cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n", + plat_priv->driver_state, offset, mem_type, data_len, data); + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + memset(&resp, 0, sizeof(resp)); + + req->offset = offset; + req->mem_type = mem_type; + req->data_len = data_len; + memcpy(req->data, data, data_len); + + req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01; + req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01; + resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req, + sizeof(*req), &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Failed to send athdiag write request, err = %d\n", + ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("athdiag write request failed, result: %d, err: %d\n", + resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + +out: + kfree(req); + return ret; +} + +int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv, + u8 fw_log_mode) +{ + int ret; + struct wlfw_ini_req_msg_v01 req; + struct wlfw_ini_resp_msg_v01 resp; + struct msg_desc req_desc, resp_desc; + + if (!plat_priv) + return -ENODEV; + + cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n", + plat_priv->driver_state, fw_log_mode); + + memset(&req, 0, sizeof(req)); + memset(&resp, 0, sizeof(resp)); + + req.enablefwlog_valid = 1; + req.enablefwlog = fw_log_mode; + + req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_WLFW_INI_REQ_V01; + req_desc.ei_array = wlfw_ini_req_msg_v01_ei; + + resp_desc.max_msg_len = WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_WLFW_INI_RESP_V01; + resp_desc.ei_array = wlfw_ini_resp_msg_v01_ei; + + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, + &req_desc, &req, sizeof(req), + &resp_desc, &resp, sizeof(resp), + QMI_WLFW_TIMEOUT_MS); + if (ret < 0) { + cnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n", + fw_log_mode, ret); + goto out; + } + + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + cnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n", + fw_log_mode, resp.resp.result, resp.resp.error); + ret = resp.resp.result; + goto out; + } + + return 0; + +out: + return ret; +} + +static void cnss_wlfw_clnt_ind(struct qmi_handle *handle, + unsigned int msg_id, void *msg, + unsigned int msg_len, void *ind_cb_priv) +{ + struct cnss_plat_data *plat_priv = ind_cb_priv; + + cnss_pr_dbg("Received QMI WLFW indication, msg_id: 0x%x, msg_len: %d\n", + msg_id, msg_len); + + if (!plat_priv) { + cnss_pr_err("plat_priv is NULL!\n"); + return; + } + + switch (msg_id) { + case QMI_WLFW_REQUEST_MEM_IND_V01: + cnss_wlfw_request_mem_ind_hdlr(plat_priv, msg, msg_len); + break; + case QMI_WLFW_FW_MEM_READY_IND_V01: + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_FW_MEM_READY, + false, NULL); + break; + case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01: + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, + false, NULL); + break; + case QMI_WLFW_FW_READY_IND_V01: + cnss_driver_event_post(plat_priv, + CNSS_DRIVER_EVENT_FW_READY, + false, NULL); + break; + case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01: + cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len); + break; + default: + cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n", + msg_id); + break; + } +} + +unsigned int cnss_get_qmi_timeout(void) +{ + cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS); + + return QMI_WLFW_TIMEOUT_MS; +} +EXPORT_SYMBOL(cnss_get_qmi_timeout); + +int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + if (!plat_priv) + return -ENODEV; + + plat_priv->qmi_wlfw_clnt = + qmi_handle_create(cnss_wlfw_clnt_notifier, plat_priv); + if (!plat_priv->qmi_wlfw_clnt) { + cnss_pr_err("Failed to create QMI client handle!\n"); + ret = -ENOMEM; + goto err_create_handle; + } + + ret = qmi_connect_to_service(plat_priv->qmi_wlfw_clnt, + WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01); + if (ret < 0) { + cnss_pr_err("Failed to connect to QMI WLFW service, err = %d\n", + ret); + goto out; + } + + ret = qmi_register_ind_cb(plat_priv->qmi_wlfw_clnt, + cnss_wlfw_clnt_ind, plat_priv); + if (ret < 0) { + cnss_pr_err("Failed to register QMI WLFW service indication callback, err = %d\n", + ret); + goto out; + } + + set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state); + + cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n", + plat_priv->driver_state); + + ret = cnss_wlfw_host_cap_send_sync(plat_priv); + if (ret < 0) + goto out; + + ret = cnss_wlfw_ind_register_send_sync(plat_priv); + if (ret < 0) + goto out; + + return 0; +out: + qmi_handle_destroy(plat_priv->qmi_wlfw_clnt); + plat_priv->qmi_wlfw_clnt = NULL; +err_create_handle: + CNSS_ASSERT(0); + return ret; +} + +int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv) +{ + if (!plat_priv) + return -ENODEV; + + qmi_handle_destroy(plat_priv->qmi_wlfw_clnt); + plat_priv->qmi_wlfw_clnt = NULL; + + clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state); + + cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n", + plat_priv->driver_state); + + return 0; +} + +int cnss_qmi_init(struct cnss_plat_data *plat_priv) +{ + int ret = 0; + + INIT_WORK(&plat_priv->qmi_recv_msg_work, + cnss_wlfw_clnt_notifier_work); + + plat_priv->qmi_wlfw_clnt_nb.notifier_call = + cnss_wlfw_clnt_svc_event_notifier; + + ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01, + &plat_priv->qmi_wlfw_clnt_nb); + if (ret < 0) + cnss_pr_err("Failed to register QMI event notifier, err = %d\n", + ret); + + return ret; +} + +void cnss_qmi_deinit(struct cnss_plat_data *plat_priv) +{ + qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01, + WLFW_SERVICE_VERS_V01, + WLFW_SERVICE_INS_ID_V01, + &plat_priv->qmi_wlfw_clnt_nb); +} diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h new file mode 100644 index 000000000000..70d8d404c48e --- /dev/null +++ b/drivers/net/wireless/cnss2/qmi.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CNSS_QMI_H +#define _CNSS_QMI_H + +#include "wlan_firmware_service_v01.h" + +struct cnss_plat_data; + +int cnss_qmi_init(struct cnss_plat_data *plat_priv); +void cnss_qmi_deinit(struct cnss_plat_data *plat_priv); +int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv); +int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv); +int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv); +int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv); +int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv); +int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv); +int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv, + enum wlfw_driver_mode_enum_v01 mode); +int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv, + struct wlfw_wlan_cfg_req_msg_v01 *data); +int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv, + u32 offset, u32 mem_type, + u32 data_len, u8 *data); +int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv, + u32 offset, u32 mem_type, + u32 data_len, u8 *data); +int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv, + u8 fw_log_mode); + +#endif /* _CNSS_QMI_H */ diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c new file mode 100644 index 000000000000..9ffe386e3677 --- /dev/null +++ b/drivers/net/wireless/cnss2/utils.c @@ -0,0 +1,129 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define CNSS_MAX_CH_NUM 45 + +#include <linux/module.h> +#include <linux/slab.h> + +static DEFINE_MUTEX(unsafe_channel_list_lock); +static DEFINE_MUTEX(dfs_nol_info_lock); + +static struct cnss_unsafe_channel_list { + u16 unsafe_ch_count; + u16 unsafe_ch_list[CNSS_MAX_CH_NUM]; +} unsafe_channel_list; + +static struct cnss_dfs_nol_info { + void *dfs_nol_info; + u16 dfs_nol_info_len; +} dfs_nol_info; + +int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count) +{ + mutex_lock(&unsafe_channel_list_lock); + if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + unsafe_channel_list.unsafe_ch_count = ch_count; + + if (ch_count != 0) { + memcpy((char *)unsafe_channel_list.unsafe_ch_list, + (char *)unsafe_ch_list, ch_count * sizeof(u16)); + } + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel); + +int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, + u16 *ch_count, u16 buf_len) +{ + mutex_lock(&unsafe_channel_list_lock); + if (!unsafe_ch_list || !ch_count) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) { + mutex_unlock(&unsafe_channel_list_lock); + return -ENOMEM; + } + + *ch_count = unsafe_channel_list.unsafe_ch_count; + memcpy((char *)unsafe_ch_list, + (char *)unsafe_channel_list.unsafe_ch_list, + unsafe_channel_list.unsafe_ch_count * sizeof(u16)); + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel); + +int cnss_wlan_set_dfs_nol(const void *info, u16 info_len) +{ + void *temp; + struct cnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + temp = kmalloc(info_len, GFP_KERNEL); + if (!temp) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOMEM; + } + + memcpy(temp, info, info_len); + dfs_info = &dfs_nol_info; + kfree(dfs_info->dfs_nol_info); + + dfs_info->dfs_nol_info = temp; + dfs_info->dfs_nol_info_len = info_len; + mutex_unlock(&dfs_nol_info_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_wlan_set_dfs_nol); + +int cnss_wlan_get_dfs_nol(void *info, u16 info_len) +{ + int len; + struct cnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + dfs_info = &dfs_nol_info; + + if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOENT; + } + + len = min(info_len, dfs_info->dfs_nol_info_len); + + memcpy(info, dfs_info->dfs_nol_info, len); + mutex_unlock(&dfs_nol_info_lock); + + return len; +} +EXPORT_SYMBOL(cnss_wlan_get_dfs_nol); diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c new file mode 100644 index 000000000000..84a4707e9cc3 --- /dev/null +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c @@ -0,0 +1,2168 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "wlan_firmware_service_v01.h" + +static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nentries), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + nbytes_max), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01, + flags), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + service_id), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_pipedir_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_dir), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01, + pipe_num), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + id), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01, + offset), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01, + addr), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_memory_region_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + region_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_memory_region_info_s_v01, + secure_flag), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_chip_info_s_v01, + chip_family), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_rf_board_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_rf_board_info_s_v01, + board_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_soc_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_soc_info_s_v01, + soc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_fw_version_info_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_version), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_fw_version_info_s_v01, + fw_build_timestamp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_ind_register_req_msg_v01, + fw_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_download_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + initiate_cal_update_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + msa_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + pin_connect_result_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + request_mem_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_mem_ready_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + fw_mem_ready_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cold_boot_cal_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cold_boot_cal_done_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + rejuvenate_enable), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_ind_register_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_ind_register_resp_msg_v01, + fw_status_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_ind_register_resp_msg_v01, + fw_status), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + pwr_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + phy_io_pin_result), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct + wlfw_pin_connect_result_ind_msg_v01, + rf_pin_result), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_driver_mode_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + mode), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01, + hw_debug), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + host_version), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_CE_V01, + .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + tgt_cfg), + .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SVC_V01, + .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + svc_cfg), + .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01, + .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg), + .ei_array = wlfw_shadow_reg_cfg_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01, + .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01, + shadow_reg_v2), + .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + chip_info), + .ei_array = wlfw_rf_chip_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_rf_board_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + board_info), + .ei_array = wlfw_rf_board_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_soc_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + soc_info), + .ei_array = wlfw_soc_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info_valid), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct wlfw_fw_version_info_s_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_version_info), + .ei_array = wlfw_fw_version_info_s_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + fw_build_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + num_macs_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_cap_resp_msg_v01, + num_macs), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + end), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + bdf_type_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_bdf_download_req_msg_v01, + bdf_type), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_bdf_download_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_report_req_msg_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data_len), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = QMI_WLFW_MAX_NUM_CAL_V01, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_report_req_msg_v01, + meta_data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_report_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct wlfw_initiate_cal_download_ind_msg_v01, + cal_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_download_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + valid), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_download_req_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_cal_download_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_initiate_cal_update_ind_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_initiate_cal_update_ind_msg_v01, + total_size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_update_req_msg_v01_ei[] = { + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + cal_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_req_msg_v01, + seg_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id_valid), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + file_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + total_size), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + seg_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + data), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_cal_update_resp_msg_v01, + end), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + msa_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01, + .elem_size = sizeof(struct wlfw_memory_region_info_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_msa_info_resp_msg_v01, + mem_region_info), + .ei_array = wlfw_memory_region_info_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ini_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_ini_req_msg_v01, + enablefwlog), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_ini_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_ini_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01, + data_len), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_athdiag_read_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_athdiag_read_resp_msg_v01, + data_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_athdiag_read_resp_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + wlfw_athdiag_read_resp_msg_v01, + data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + wlfw_athdiag_write_req_msg_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_athdiag_write_req_msg_v01, + mem_type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct + wlfw_athdiag_write_req_msg_v01, + data_len), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct + wlfw_athdiag_write_req_msg_v01, + data), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_athdiag_write_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_vbatt_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_vbatt_req_msg_v01, + voltage_uv), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_vbatt_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01, + .elem_size = sizeof(u8), + .is_array = STATIC_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_mac_addr_req_msg_v01, + mac_addr), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + daemon_support), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_host_cap_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + cause_for_rejuvenation), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + requesting_sub_system), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number_valid), + }, + { + .data_type = QMI_UNSIGNED_2_BYTE, + .elem_len = 1, + .elem_size = sizeof(u16), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + line_number), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name_valid), + }, + { + .data_type = QMI_STRING, + .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01, + function_name), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + wlfw_rejuvenate_ack_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_req_msg_v01, + mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_req_msg_v01, + mask), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + prev_mask), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct wlfw_dynamic_feature_mask_resp_msg_v01, + curr_mask), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_m3_info_req_msg_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_m3_info_req_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct wlfw_m3_info_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .is_array = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h new file mode 100644 index 000000000000..cb8225a7c3c3 --- /dev/null +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h @@ -0,0 +1,645 @@ +/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef WLAN_FIRMWARE_SERVICE_V01_H +#define WLAN_FIRMWARE_SERVICE_V01_H + +#include <linux/qmi_encdec.h> +#include <soc/qcom/msm_qmi_interface.h> + +#define WLFW_SERVICE_ID_V01 0x45 +#define WLFW_SERVICE_VERS_V01 0x01 + +#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 +#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 +#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A +#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B +#define QMI_WLFW_M3_INFO_REQ_V01 0x003C +#define QMI_WLFW_CAP_REQ_V01 0x0024 +#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 +#define QMI_WLFW_M3_INFO_RESP_V01 0x003C +#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 +#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027 +#define QMI_WLFW_INI_RESP_V01 0x002F +#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026 +#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033 +#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028 +#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034 +#define QMI_WLFW_MSA_READY_IND_V01 0x002B +#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 +#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 +#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 +#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038 +#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 +#define QMI_WLFW_REJUVENATE_IND_V01 0x0039 +#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B +#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031 +#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022 +#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036 +#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C +#define QMI_WLFW_FW_READY_IND_V01 0x0021 +#define QMI_WLFW_MSA_READY_RESP_V01 0x002E +#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029 +#define QMI_WLFW_INI_REQ_V01 0x002F +#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025 +#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A +#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D +#define QMI_WLFW_MSA_READY_REQ_V01 0x002E +#define QMI_WLFW_CAP_RESP_V01 0x0024 +#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A +#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030 +#define QMI_WLFW_VBATT_REQ_V01 0x0032 +#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033 +#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036 +#define QMI_WLFW_VBATT_RESP_V01 0x0032 +#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D +#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027 +#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030 +#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023 +#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 + +#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2 +#define QMI_WLFW_MAX_NUM_CAL_V01 5 +#define QMI_WLFW_MAX_DATA_SIZE_V01 6144 +#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_CE_V01 12 +#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 +#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 512 +#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 +#define QMI_WLFW_MAX_STR_LEN_V01 16 +#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 +#define QMI_WLFW_MAC_ADDR_SIZE_V01 6 +#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36 +#define QMI_WLFW_MAX_NUM_SVC_V01 24 + +enum wlfw_driver_mode_enum_v01 { + WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_MISSION_V01 = 0, + QMI_WLFW_FTM_V01 = 1, + QMI_WLFW_EPPING_V01 = 2, + QMI_WLFW_WALTEST_V01 = 3, + QMI_WLFW_OFF_V01 = 4, + QMI_WLFW_CCPM_V01 = 5, + QMI_WLFW_QVIT_V01 = 6, + QMI_WLFW_CALIBRATION_V01 = 7, + WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum wlfw_cal_temp_id_enum_v01 { + WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0, + QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1, + QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2, + QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3, + QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4, + WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +enum wlfw_pipedir_enum_v01 { + WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_PIPEDIR_NONE_V01 = 0, + QMI_WLFW_PIPEDIR_IN_V01 = 1, + QMI_WLFW_PIPEDIR_OUT_V01 = 2, + QMI_WLFW_PIPEDIR_INOUT_V01 = 3, + WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX, +}; + +#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) +#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) +#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) +#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04) +#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08) +#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10) + +#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL) +#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL) +#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL) +#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL) + +#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL) + +struct wlfw_ce_tgt_pipe_cfg_s_v01 { + u32 pipe_num; + enum wlfw_pipedir_enum_v01 pipe_dir; + u32 nentries; + u32 nbytes_max; + u32 flags; +}; + +struct wlfw_ce_svc_pipe_cfg_s_v01 { + u32 service_id; + enum wlfw_pipedir_enum_v01 pipe_dir; + u32 pipe_num; +}; + +struct wlfw_shadow_reg_cfg_s_v01 { + u16 id; + u16 offset; +}; + +struct wlfw_shadow_reg_v2_cfg_s_v01 { + u32 addr; +}; + +struct wlfw_memory_region_info_s_v01 { + u64 region_addr; + u32 size; + u8 secure_flag; +}; + +struct wlfw_rf_chip_info_s_v01 { + u32 chip_id; + u32 chip_family; +}; + +struct wlfw_rf_board_info_s_v01 { + u32 board_id; +}; + +struct wlfw_soc_info_s_v01 { + u32 soc_id; +}; + +struct wlfw_fw_version_info_s_v01 { + u32 fw_version; + char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1]; +}; + +struct wlfw_ind_register_req_msg_v01 { + u8 fw_ready_enable_valid; + u8 fw_ready_enable; + u8 initiate_cal_download_enable_valid; + u8 initiate_cal_download_enable; + u8 initiate_cal_update_enable_valid; + u8 initiate_cal_update_enable; + u8 msa_ready_enable_valid; + u8 msa_ready_enable; + u8 pin_connect_result_enable_valid; + u8 pin_connect_result_enable; + u8 client_id_valid; + u32 client_id; + u8 request_mem_enable_valid; + u8 request_mem_enable; + u8 fw_mem_ready_enable_valid; + u8 fw_mem_ready_enable; + u8 cold_boot_cal_done_enable_valid; + u8 cold_boot_cal_done_enable; + u8 rejuvenate_enable_valid; + u32 rejuvenate_enable; +}; + +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46 +extern struct elem_info wlfw_ind_register_req_msg_v01_ei[]; + +struct wlfw_ind_register_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 fw_status_valid; + u64 fw_status; +}; + +#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[]; + +struct wlfw_fw_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[]; + +struct wlfw_msa_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[]; + +struct wlfw_pin_connect_result_ind_msg_v01 { + u8 pwr_pin_result_valid; + u32 pwr_pin_result; + u8 phy_io_pin_result_valid; + u32 phy_io_pin_result; + u8 rf_pin_result_valid; + u32 rf_pin_result; +}; + +#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21 +extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[]; + +struct wlfw_wlan_mode_req_msg_v01 { + enum wlfw_driver_mode_enum_v01 mode; + u8 hw_debug_valid; + u8 hw_debug; +}; + +#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[]; + +struct wlfw_wlan_mode_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[]; + +struct wlfw_wlan_cfg_req_msg_v01 { + u8 host_version_valid; + char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1]; + u8 tgt_cfg_valid; + u32 tgt_cfg_len; + struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01]; + u8 svc_cfg_valid; + u32 svc_cfg_len; + struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01]; + u8 shadow_reg_valid; + u32 shadow_reg_len; + struct wlfw_shadow_reg_cfg_s_v01 + shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01]; + u8 shadow_reg_v2_valid; + u32 shadow_reg_v2_len; + struct wlfw_shadow_reg_v2_cfg_s_v01 + shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01]; +}; + +#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803 +extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[]; + +struct wlfw_wlan_cfg_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[]; + +struct wlfw_cap_req_msg_v01 { + char placeholder; +}; + +#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cap_req_msg_v01_ei[]; + +struct wlfw_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 chip_info_valid; + struct wlfw_rf_chip_info_s_v01 chip_info; + u8 board_info_valid; + struct wlfw_rf_board_info_s_v01 board_info; + u8 soc_info_valid; + struct wlfw_soc_info_s_v01 soc_info; + u8 fw_version_info_valid; + struct wlfw_fw_version_info_s_v01 fw_version_info; + u8 fw_build_id_valid; + char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1]; + u8 num_macs_valid; + u8 num_macs; +}; + +#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207 +extern struct elem_info wlfw_cap_resp_msg_v01_ei[]; + +struct wlfw_bdf_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; + u8 bdf_type_valid; + u8 bdf_type; +}; + +#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182 +extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[]; + +struct wlfw_bdf_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[]; + +struct wlfw_cal_report_req_msg_v01 { + u32 meta_data_len; + enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01]; +}; + +#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24 +extern struct elem_info wlfw_cal_report_req_msg_v01_ei[]; + +struct wlfw_cal_report_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_download_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; +}; + +#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[]; + +struct wlfw_cal_download_req_msg_v01 { + u8 valid; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178 +extern struct elem_info wlfw_cal_download_req_msg_v01_ei[]; + +struct wlfw_cal_download_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[]; + +struct wlfw_initiate_cal_update_ind_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 total_size; +}; + +#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14 +extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[]; + +struct wlfw_cal_update_req_msg_v01 { + enum wlfw_cal_temp_id_enum_v01 cal_id; + u32 seg_id; +}; + +#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14 +extern struct elem_info wlfw_cal_update_req_msg_v01_ei[]; + +struct wlfw_cal_update_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 file_id_valid; + enum wlfw_cal_temp_id_enum_v01 file_id; + u8 total_size_valid; + u32 total_size; + u8 seg_id_valid; + u32 seg_id; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_DATA_SIZE_V01]; + u8 end_valid; + u8 end; +}; + +#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181 +extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[]; + +struct wlfw_msa_info_req_msg_v01 { + u64 msa_addr; + u32 size; +}; + +#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_msa_info_req_msg_v01_ei[]; + +struct wlfw_msa_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u32 mem_region_info_len; + struct wlfw_memory_region_info_s_v01 + mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01]; +}; + +#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37 +extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[]; + +struct wlfw_msa_ready_req_msg_v01 { + char placeholder; +}; + +#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[]; + +struct wlfw_msa_ready_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[]; + +struct wlfw_ini_req_msg_v01 { + u8 enablefwlog_valid; + u8 enablefwlog; +}; + +#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct elem_info wlfw_ini_req_msg_v01_ei[]; + +struct wlfw_ini_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_ini_resp_msg_v01_ei[]; + +struct wlfw_athdiag_read_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; +}; + +#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21 +extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[]; + +struct wlfw_athdiag_read_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 data_valid; + u32 data_len; + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 524 +extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[]; + +struct wlfw_athdiag_write_req_msg_v01 { + u32 offset; + u32 mem_type; + u32 data_len; + u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01]; +}; + +#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 531 +extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[]; + +struct wlfw_athdiag_write_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[]; + +struct wlfw_vbatt_req_msg_v01 { + u64 voltage_uv; +}; + +#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_vbatt_req_msg_v01_ei[]; + +struct wlfw_vbatt_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[]; + +struct wlfw_mac_addr_req_msg_v01 { + u8 mac_addr_valid; + u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01]; +}; + +#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9 +extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[]; + +struct wlfw_mac_addr_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[]; + +struct wlfw_host_cap_req_msg_v01 { + u8 daemon_support_valid; + u8 daemon_support; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4 +extern struct elem_info wlfw_host_cap_req_msg_v01_ei[]; + +struct wlfw_host_cap_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[]; + +struct wlfw_request_mem_ind_msg_v01 { + u32 size; +}; + +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[]; + +struct wlfw_respond_mem_req_msg_v01 { + u64 addr; + u32 size; +}; + +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[]; + +struct wlfw_respond_mem_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[]; + +struct wlfw_fw_mem_ready_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[]; + +struct wlfw_cold_boot_cal_done_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ind_msg_v01 { + u8 cause_for_rejuvenation_valid; + u8 cause_for_rejuvenation; + u8 requesting_sub_system_valid; + u8 requesting_sub_system; + u8 line_number_valid; + u16 line_number; + u8 function_name_valid; + char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1]; +}; + +#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144 +extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_req_msg_v01 { + char placeholder; +}; + +#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[]; + +struct wlfw_rejuvenate_ack_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_req_msg_v01 { + u8 mask_valid; + u64 mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11 +extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[]; + +struct wlfw_dynamic_feature_mask_resp_msg_v01 { + struct qmi_response_type_v01 resp; + u8 prev_mask_valid; + u64 prev_mask; + u8 curr_mask_valid; + u64 curr_mask; +}; + +#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29 +extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[]; + +struct wlfw_m3_info_req_msg_v01 { + u64 addr; + u32 size; +}; + +#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18 +extern struct elem_info wlfw_m3_info_req_msg_v01_ei[]; + +struct wlfw_m3_info_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; + +#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info wlfw_m3_info_resp_msg_v01_ei[]; + +#endif diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c index a452900868c4..d73846efbc4c 100644 --- a/drivers/net/wireless/cnss_utils/cnss_utils.c +++ b/drivers/net/wireless/cnss_utils/cnss_utils.c @@ -283,7 +283,7 @@ static int __init cnss_utils_init(void) { struct cnss_utils_priv *priv = NULL; - priv = kmalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 888e9cfef51a..34a062ccb11d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 044253dca30a..b8a5a8e8f57d 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -27,6 +27,13 @@ enum { NVME_NS_LIGHTNVM = 1, }; +/* The below value is the specific amount of delay needed before checking + * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the + * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was + * found empirically. + */ +#define NVME_QUIRK_DELAY_AMOUNT 2000 + /* * Represents an NVM Express device. Each nvme_dev is a PCI function. */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index c851bc53831c..4c673d45f1bd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1633,10 +1633,15 @@ static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) */ static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + dev->ctrl_config &= ~NVME_CC_SHN_MASK; dev->ctrl_config &= ~NVME_CC_ENABLE; writel(dev->ctrl_config, &dev->bar->cc); + if (pdev->vendor == 0x1c58 && pdev->device == 0x0003) + msleep(NVME_QUIRK_DELAY_AMOUNT); + return nvme_wait_ready(dev, cap, false); } diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 0438512f4d69..ca175710c4c8 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -632,9 +632,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, const char *pathp; int offset, rc = 0, depth = -1; - for (offset = fdt_next_node(blob, -1, &depth); - offset >= 0 && depth >= 0 && !rc; - offset = fdt_next_node(blob, offset, &depth)) { + if (!blob) + return 0; + + for (offset = fdt_next_node(blob, -1, &depth); + offset >= 0 && depth >= 0 && !rc; + offset = fdt_next_node(blob, offset, &depth)) { pathp = fdt_get_name(blob, offset, NULL); if (*pathp == '/') diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 66bdc593f811..333d28e64087 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -194,7 +194,7 @@ config MSM_11AD tristate "Platform driver for 11ad chip" depends on PCI depends on PCI_MSM - default y + default m ---help--- This module adds required platform support for wireless adapter based on Qualcomm Technologies, Inc. 11ad chip, integrated into MSM platform diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h index 981129eb9f3a..d5f102eaaac6 100644 --- a/drivers/platform/msm/ipa/ipa_common_i.h +++ b/drivers/platform/msm/ipa/ipa_common_i.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -26,7 +26,8 @@ log_info.file = __FILENAME__; \ log_info.line = __LINE__; \ log_info.type = EP; \ - log_info.id_string = ipa_clients_strings[client] + log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \ + ? "Invalid Client" : ipa_clients_strings[client] #define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \ log_info.file = __FILENAME__; \ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c index 80514f6c738e..b7f451b7c6fd 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c @@ -1027,7 +1027,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } @@ -1048,7 +1048,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, } INIT_LIST_HEAD(&entry->link); entry->rule = *rule; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_FLT_COOKIE; entry->rt_tbl = rt_tbl; entry->tbl = tbl; if (add_rear) { @@ -1067,13 +1067,19 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } *rule_hdl = id; entry->id = id; IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; - +ipa_insert_failed: + tbl->rule_cnt--; + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + list_del(&entry->link); + kmem_cache_free(ipa_ctx->flt_rule_cache, entry); error: return -EPERM; } @@ -1089,7 +1095,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1121,7 +1127,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); goto error; } @@ -1142,7 +1148,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index 10a49b1e75d8..75e4ff5c3c57 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -547,7 +547,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, { struct ipa_hdr_entry *hdr_entry; struct ipa_hdr_proc_ctx_entry *entry; - struct ipa_hdr_proc_ctx_offset_entry *offset; + struct ipa_hdr_proc_ctx_offset_entry *offset = NULL; u32 bin; struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; int id; @@ -563,7 +563,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, } hdr_entry = ipa_id_find(proc_ctx->hdr_hdl); - if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) { + if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) { IPAERR("hdr_hdl is invalid\n"); return -EINVAL; } @@ -580,7 +580,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, entry->hdr = hdr_entry; if (add_ref_hdr) hdr_entry->ref_cnt++; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_PROC_HDR_COOKIE; needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ? sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) : @@ -640,6 +640,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; proc_ctx->proc_ctx_hdl = id; @@ -647,6 +648,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; +ipa_insert_failed: + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + bad_len: if (add_ref_hdr) hdr_entry->ref_cnt--; @@ -659,7 +668,7 @@ bad_len: static int __ipa_add_hdr(struct ipa_hdr_add *hdr) { struct ipa_hdr_entry *entry; - struct ipa_hdr_offset_entry *offset; + struct ipa_hdr_offset_entry *offset = NULL; u32 bin; struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl; int id; @@ -691,7 +700,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->type = hdr->type; entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_HDR_COOKIE; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -780,6 +789,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; hdr->hdr_hdl = id; @@ -804,10 +814,19 @@ fail_add_proc_ctx: entry->ref_cnt--; hdr->hdr_hdl = 0; ipa_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } htbl->hdr_cnt--; list_del(&entry->link); - dma_unmap_single(ipa_ctx->pdev, entry->phys_base, - entry->hdr_len, DMA_TO_DEVICE); + fail_dma_mapping: entry->is_hdr_proc_ctx = false; bad_hdr_len: @@ -824,7 +843,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl; entry = ipa_id_find(proc_ctx_hdl); - if (!entry || (entry->cookie != IPA_COOKIE)) { + if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("bad parm\n"); return -EINVAL; } @@ -875,7 +894,7 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad parm\n"); return -EINVAL; } @@ -1444,7 +1463,7 @@ int ipa2_put_hdr(u32 hdr_hdl) goto bail; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("invalid header entry\n"); result = -EINVAL; goto bail; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h index bb11230c960a..70b2a2eeb53f 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h @@ -37,7 +37,15 @@ #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" + #define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + + #define MTU_BYTE 1500 #define IPA_MAX_NUM_PIPES 0x14 @@ -223,8 +231,8 @@ struct ipa_smmu_cb_ctx { */ struct ipa_flt_entry { struct list_head link; - struct ipa_flt_rule rule; u32 cookie; + struct ipa_flt_rule rule; struct ipa_flt_tbl *tbl; struct ipa_rt_tbl *rt_tbl; u32 hw_len; @@ -249,13 +257,13 @@ struct ipa_flt_entry { */ struct ipa_rt_tbl { struct list_head link; + u32 cookie; struct list_head head_rt_rule_list; char name[IPA_RESOURCE_NAME_MAX]; u32 idx; u32 rule_cnt; u32 ref_cnt; struct ipa_rt_tbl_set *set; - u32 cookie; bool in_sys; u32 sz; struct ipa_mem_buffer curr_mem; @@ -286,6 +294,7 @@ struct ipa_rt_tbl { */ struct ipa_hdr_entry { struct list_head link; + u32 cookie; u8 hdr[IPA_HDR_MAX_SIZE]; u32 hdr_len; char name[IPA_RESOURCE_NAME_MAX]; @@ -295,7 +304,6 @@ struct ipa_hdr_entry { dma_addr_t phys_base; struct ipa_hdr_proc_ctx_entry *proc_ctx; struct ipa_hdr_offset_entry *offset_entry; - u32 cookie; u32 ref_cnt; int id; u8 is_eth2_ofst_valid; @@ -368,10 +376,10 @@ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq { */ struct ipa_hdr_proc_ctx_entry { struct list_head link; + u32 cookie; enum ipa_hdr_proc_type type; struct ipa_hdr_proc_ctx_offset_entry *offset_entry; struct ipa_hdr_entry *hdr; - u32 cookie; u32 ref_cnt; int id; bool user_deleted; @@ -427,8 +435,8 @@ struct ipa_flt_tbl { */ struct ipa_rt_entry { struct list_head link; - struct ipa_rt_rule rule; u32 cookie; + struct ipa_rt_rule rule; struct ipa_rt_tbl *tbl; struct ipa_hdr_entry *hdr; struct ipa_hdr_proc_ctx_entry *proc_ctx; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c index 5c07bc7d43b5..e3f987d47692 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -541,15 +541,15 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, char __user *start; struct ipa_push_msg *msg = NULL; int ret; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int locked; start = buf; + add_wait_queue(&ipa_ctx->msg_waitq, &wait); while (1) { mutex_lock(&ipa_ctx->msg_lock); locked = 1; - prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE); if (!list_empty(&ipa_ctx->msg_list)) { msg = list_first_entry(&ipa_ctx->msg_list, struct ipa_push_msg, link); @@ -601,10 +601,10 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, locked = 0; mutex_unlock(&ipa_ctx->msg_lock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - finish_wait(&ipa_ctx->msg_waitq, &wait); + remove_wait_queue(&ipa_ctx->msg_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index f2909110d09f..9ac7b3809895 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -909,7 +909,7 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, INIT_LIST_HEAD(&entry->link); strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); entry->set = set; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_TBL_COOKIE; entry->in_sys = (ip == IPA_IP_v4) ? !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl; set->tbl_cnt++; @@ -922,12 +922,16 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; } return entry; +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); fail_rt_idx_alloc: entry->cookie = 0; kmem_cache_free(ipa_ctx->rt_tbl_cache, entry); @@ -940,7 +944,7 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) enum ipa_ip_type ip = IPA_IP_MAX; u32 id; - if (entry == NULL || (entry->cookie != IPA_COOKIE)) { + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad parms\n"); return -EINVAL; } @@ -954,8 +958,11 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry) ip = IPA_IP_v4; else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + return -EPERM; + } + if (!entry->in_sys) { list_del(&entry->link); @@ -994,13 +1001,14 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, if (rule->hdr_hdl) { hdr = ipa_id_find(rule->hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } } else if (rule->hdr_proc_ctx_hdl) { proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl); - if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) { + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); goto error; } @@ -1008,7 +1016,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, tbl = __ipa_add_rt_tbl(ip, name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad params\n"); goto error; } @@ -1029,7 +1037,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, goto error; } INIT_LIST_HEAD(&entry->link); - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_RULE_COOKIE; entry->rule = *rule; entry->tbl = tbl; entry->hdr = hdr; @@ -1123,7 +1131,7 @@ int __ipa_del_rt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1358,7 +1366,7 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) } mutex_lock(&ipa_ctx->lock); entry = __ipa_find_rt_tbl(lookup->ip, lookup->name); - if (entry && entry->cookie == IPA_COOKIE) { + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { if (entry->ref_cnt == U32_MAX) { IPAERR("fail: ref count crossed limit\n"); goto ret; @@ -1401,7 +1409,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl) goto ret; } - if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) { + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { IPAERR("bad parms\n"); result = -EINVAL; goto ret; @@ -1411,8 +1419,11 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl) ip = IPA_IP_v4; else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + result = -EINVAL; + goto ret; + } entry->ref_cnt--; if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { @@ -1439,7 +1450,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) if (rtrule->rule.hdr_hdl) { hdr = ipa_id_find(rtrule->rule.hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } @@ -1451,7 +1462,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index bc6622d4725b..8c6bd48cfb2c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1254,6 +1254,12 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config)); gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx); + if (gsi_ep_cfg_ptr == NULL) { + IPAERR("Error ipa_get_gsi_ep_info ret NULL\n"); + result = -EFAULT; + goto write_evt_scratch_fail; + } + params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 6a3a89e2cf8d..5fe425ea7655 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -1188,6 +1188,15 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys) } else { inactive_cycles = 0; } + + /* + * if pipe is out of buffers there is no point polling for + * completed descs; release the worker so delayed work can + * run in a timely manner + */ + if (sys->len == 0) + break; + } while (inactive_cycles <= POLLING_INACTIVITY_RX); trace_poll_to_intr3(sys->ep->client); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c index 41b29335d23b..42632c527bdd 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c @@ -745,7 +745,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule, goto error; } - if ((*rt_tbl)->cookie != IPA_COOKIE) { + if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } @@ -787,7 +787,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry, } INIT_LIST_HEAD(&((*entry)->link)); (*entry)->rule = *rule; - (*entry)->cookie = IPA_COOKIE; + (*entry)->cookie = IPA_FLT_COOKIE; (*entry)->rt_tbl = rt_tbl; (*entry)->tbl = tbl; if (rule->rule_id) { @@ -822,12 +822,18 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } *rule_hdl = id; entry->id = id; IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt); return 0; +ipa_insert_failed: + if (entry->rt_tbl) + entry->rt_tbl->ref_cnt--; + tbl->rule_cnt--; + return -EPERM; } static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, @@ -853,9 +859,16 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip, list_add(&entry->link, &tbl->head_flt_rule_list); } - __ipa_finish_flt_rule_add(tbl, entry, rule_hdl); + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(&entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); error: return -EPERM; @@ -887,7 +900,8 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, list_add(&entry->link, &((*add_after_entry)->link)); - __ipa_finish_flt_rule_add(tbl, entry, rule_hdl); + if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl)) + goto ipa_insert_failed; /* * prepare for next insertion @@ -896,6 +910,13 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl, return 0; +ipa_insert_failed: + list_del(&entry->link); + /* if rule id was allocated from idr, remove it */ + if (!(entry->rule_id & ipahal_get_rule_id_hi_bit())) + idr_remove(&entry->tbl->rule_ids, entry->rule_id); + kmem_cache_free(ipa3_ctx->flt_rule_cache, entry); + error: *add_after_entry = NULL; return -EPERM; @@ -912,7 +933,7 @@ static int __ipa_del_flt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -949,7 +970,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_FLT_COOKIE) { IPAERR("bad params\n"); goto error; } @@ -970,7 +991,7 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule, goto error; } - if (rt_tbl->cookie != IPA_COOKIE) { + if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) { IPAERR("RT table cookie is invalid\n"); goto error; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index a5186f1aff35..34bc7dcd63cf 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -339,7 +339,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, IPAERR("hdr_hdl is invalid\n"); return -EINVAL; } - if (hdr_entry->cookie != IPA_COOKIE) { + if (hdr_entry->cookie != IPA_HDR_COOKIE) { IPAERR("Invalid header cookie %u\n", hdr_entry->cookie); WARN_ON(1); return -EINVAL; @@ -359,7 +359,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, entry->hdr = hdr_entry; if (add_ref_hdr) hdr_entry->ref_cnt++; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_HDR_COOKIE; needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type); @@ -417,6 +417,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; proc_ctx->proc_ctx_hdl = id; @@ -424,6 +425,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; +ipa_insert_failed: + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + list_del(&entry->link); + htbl->proc_ctx_cnt--; + bad_len: if (add_ref_hdr) hdr_entry->ref_cnt--; @@ -436,7 +445,7 @@ bad_len: static int __ipa_add_hdr(struct ipa_hdr_add *hdr) { struct ipa3_hdr_entry *entry; - struct ipa_hdr_offset_entry *offset; + struct ipa_hdr_offset_entry *offset = NULL; u32 bin; struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl; int id; @@ -467,7 +476,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) entry->type = hdr->type; entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid; entry->eth2_ofst = hdr->eth2_ofst; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_HDR_COOKIE; if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0]) bin = IPA_HDR_BIN0; @@ -546,6 +555,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr) if (id < 0) { IPAERR("failed to alloc id\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; hdr->hdr_hdl = id; @@ -570,10 +580,19 @@ fail_add_proc_ctx: entry->ref_cnt--; hdr->hdr_hdl = 0; ipa3_id_remove(id); +ipa_insert_failed: + if (entry->is_hdr_proc_ctx) { + dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, + entry->hdr_len, DMA_TO_DEVICE); + } else { + if (offset) + list_move(&offset->link, + &htbl->head_free_offset_list[offset->bin]); + entry->offset_entry = NULL; + } htbl->hdr_cnt--; list_del(&entry->link); - dma_unmap_single(ipa3_ctx->pdev, entry->phys_base, - entry->hdr_len, DMA_TO_DEVICE); + fail_dma_mapping: entry->is_hdr_proc_ctx = false; @@ -591,7 +610,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl; entry = ipa3_id_find(proc_ctx_hdl); - if (!entry || (entry->cookie != IPA_COOKIE)) { + if (!entry || (entry->cookie != IPA_HDR_COOKIE)) { IPAERR("bad parm\n"); return -EINVAL; } @@ -642,7 +661,7 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("bad parm\n"); return -EINVAL; } @@ -1188,7 +1207,7 @@ int ipa3_put_hdr(u32 hdr_hdl) goto bail; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_HDR_COOKIE) { IPAERR("invalid header entry\n"); result = -EINVAL; goto bail; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index ac7ef6a21952..cbc53fb6815d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -40,6 +40,12 @@ #define DRV_NAME "ipa" #define NAT_DEV_NAME "ipaNatTable" #define IPA_COOKIE 0x57831603 +#define IPA_RT_RULE_COOKIE 0x57831604 +#define IPA_RT_TBL_COOKIE 0x57831605 +#define IPA_FLT_COOKIE 0x57831606 +#define IPA_HDR_COOKIE 0x57831607 +#define IPA_PROC_HDR_COOKIE 0x57831608 + #define MTU_BYTE 1500 #define IPA_EP_NOT_ALLOCATED (-1) @@ -217,8 +223,8 @@ struct ipa_smmu_cb_ctx { */ struct ipa3_flt_entry { struct list_head link; - struct ipa_flt_rule rule; u32 cookie; + struct ipa_flt_rule rule; struct ipa3_flt_tbl *tbl; struct ipa3_rt_tbl *rt_tbl; u32 hw_len; @@ -246,13 +252,13 @@ struct ipa3_flt_entry { */ struct ipa3_rt_tbl { struct list_head link; + u32 cookie; struct list_head head_rt_rule_list; char name[IPA_RESOURCE_NAME_MAX]; u32 idx; u32 rule_cnt; u32 ref_cnt; struct ipa3_rt_tbl_set *set; - u32 cookie; bool in_sys[IPA_RULE_TYPE_MAX]; u32 sz[IPA_RULE_TYPE_MAX]; struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX]; @@ -284,6 +290,7 @@ struct ipa3_rt_tbl { */ struct ipa3_hdr_entry { struct list_head link; + u32 cookie; u8 hdr[IPA_HDR_MAX_SIZE]; u32 hdr_len; char name[IPA_RESOURCE_NAME_MAX]; @@ -293,7 +300,6 @@ struct ipa3_hdr_entry { dma_addr_t phys_base; struct ipa3_hdr_proc_ctx_entry *proc_ctx; struct ipa_hdr_offset_entry *offset_entry; - u32 cookie; u32 ref_cnt; int id; u8 is_eth2_ofst_valid; @@ -342,10 +348,10 @@ struct ipa3_hdr_proc_ctx_offset_entry { */ struct ipa3_hdr_proc_ctx_entry { struct list_head link; + u32 cookie; enum ipa_hdr_proc_type type; struct ipa3_hdr_proc_ctx_offset_entry *offset_entry; struct ipa3_hdr_entry *hdr; - u32 cookie; u32 ref_cnt; int id; bool user_deleted; @@ -407,8 +413,8 @@ struct ipa3_flt_tbl { */ struct ipa3_rt_entry { struct list_head link; - struct ipa_rt_rule rule; u32 cookie; + struct ipa_rt_rule rule; struct ipa3_rt_tbl *tbl; struct ipa3_hdr_entry *hdr; struct ipa3_hdr_proc_ctx_entry *proc_ctx; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index 16a567644f79..38e8d4e9d639 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -546,17 +546,15 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, char __user *start; struct ipa3_push_msg *msg = NULL; int ret; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int locked; start = buf; + add_wait_queue(&ipa3_ctx->msg_waitq, &wait); while (1) { mutex_lock(&ipa3_ctx->msg_lock); locked = 1; - prepare_to_wait(&ipa3_ctx->msg_waitq, - &wait, - TASK_INTERRUPTIBLE); if (!list_empty(&ipa3_ctx->msg_list)) { msg = list_first_entry(&ipa3_ctx->msg_list, @@ -609,10 +607,10 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, locked = 0; mutex_unlock(&ipa3_ctx->msg_lock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - finish_wait(&ipa3_ctx->msg_waitq, &wait); + remove_wait_queue(&ipa3_ctx->msg_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index cd027c28e597..3267e0e83a82 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -191,7 +191,7 @@ static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client, static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, int ipa_ep_idx, struct start_gsi_channel *params) { - int res; + int res = 0; struct gsi_evt_ring_props ev_props; struct ipa_mhi_msi_info *msi; struct gsi_chan_props ch_props; @@ -241,7 +241,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, if (res) { IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res); goto fail_alloc_evt; - return res; } IPA_MHI_DBG("client %d, caching event ring hdl %lu\n", client, @@ -259,7 +258,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", params->ev_ctx_host->wp); goto fail_alloc_ch; - return res; } IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", @@ -270,7 +268,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n", res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); goto fail_alloc_ch; - return res; } memset(&ch_props, 0, sizeof(ch_props)); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 0269bfba993f..690a9db67ff0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -1177,10 +1177,13 @@ void ipa3_qmi_service_exit(void) } /* clean the QMI msg cache */ + mutex_lock(&ipa3_qmi_lock); if (ipa3_qmi_ctx != NULL) { vfree(ipa3_qmi_ctx); ipa3_qmi_ctx = NULL; } + mutex_unlock(&ipa3_qmi_lock); + ipa3_svc_handle = 0; ipa3_qmi_modem_init_fin = false; ipa3_qmi_indication_fin = false; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index 6197c9f64ca5..2ade584890fb 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -796,7 +796,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, INIT_LIST_HEAD(&entry->link); strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX); entry->set = set; - entry->cookie = IPA_COOKIE; + entry->cookie = IPA_RT_TBL_COOKIE; entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ? !ipa3_ctx->ip4_rt_tbl_hash_lcl : !ipa3_ctx->ip6_rt_tbl_hash_lcl; @@ -814,12 +814,16 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip, if (id < 0) { IPAERR("failed to add to tree\n"); WARN_ON(1); + goto ipa_insert_failed; } entry->id = id; } return entry; - +ipa_insert_failed: + set->tbl_cnt--; + list_del(&entry->link); + idr_destroy(&entry->rule_ids); fail_rt_idx_alloc: entry->cookie = 0; kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry); @@ -833,7 +837,7 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) u32 id; struct ipa3_rt_tbl_set *rset; - if (entry == NULL || (entry->cookie != IPA_COOKIE)) { + if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("bad parms\n"); return -EINVAL; } @@ -847,8 +851,10 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry) ip = IPA_IP_v4; else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + return -EPERM; + } rset = &ipa3_ctx->reap_rt_tbl_set[ip]; @@ -885,14 +891,14 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule, if (rule->hdr_hdl) { *hdr = ipa3_id_find(rule->hdr_hdl); - if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) { + if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); return -EPERM; } } else if (rule->hdr_proc_ctx_hdl) { *proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl); if ((*proc_ctx == NULL) || - ((*proc_ctx)->cookie != IPA_COOKIE)) { + ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); return -EPERM; @@ -915,7 +921,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry, goto error; } INIT_LIST_HEAD(&(*entry)->link); - (*(entry))->cookie = IPA_COOKIE; + (*(entry))->cookie = IPA_RT_RULE_COOKIE; (*(entry))->rule = *rule; (*(entry))->tbl = tbl; (*(entry))->hdr = hdr; @@ -983,7 +989,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name, tbl = __ipa_add_rt_tbl(ip, name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("failed adding rt tbl name = %s\n", name ? name : ""); goto error; @@ -1118,7 +1124,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules) mutex_lock(&ipa3_ctx->lock); tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name); - if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) { + if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) { IPAERR("failed finding rt tbl name = %s\n", rules->rt_tbl_name ? rules->rt_tbl_name : ""); ret = -EINVAL; @@ -1202,7 +1208,7 @@ int __ipa3_del_rt_rule(u32 rule_hdl) return -EINVAL; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); return -EINVAL; } @@ -1440,7 +1446,7 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup) } mutex_lock(&ipa3_ctx->lock); entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name); - if (entry && entry->cookie == IPA_COOKIE) { + if (entry && entry->cookie == IPA_RT_TBL_COOKIE) { if (entry->ref_cnt == U32_MAX) { IPAERR("fail: ref count crossed limit\n"); goto ret; @@ -1483,7 +1489,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) goto ret; } - if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) { + if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) { IPAERR("bad parms\n"); result = -EINVAL; goto ret; @@ -1493,8 +1499,10 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) ip = IPA_IP_v4; else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6]) ip = IPA_IP_v6; - else + else { WARN_ON(1); + goto ret; + } entry->ref_cnt--; if (entry->ref_cnt == 0 && entry->rule_cnt == 0) { @@ -1524,13 +1532,14 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) if (rtrule->rule.hdr_hdl) { hdr = ipa3_id_find(rtrule->rule.hdr_hdl); - if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) { + if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) { IPAERR("rt rule does not point to valid hdr\n"); goto error; } } else if (rtrule->rule.hdr_proc_ctx_hdl) { proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl); - if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) { + if ((proc_ctx == NULL) || + (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) { IPAERR("rt rule does not point to valid proc ctx\n"); goto error; } @@ -1542,7 +1551,7 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule) goto error; } - if (entry->cookie != IPA_COOKIE) { + if (entry->cookie != IPA_RT_RULE_COOKIE) { IPAERR("bad params\n"); goto error; } diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index be3bc2f4edd4..09cc64b3b695 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -807,6 +807,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) case 11: case 7: case 6: + case 1: ideapad_input_report(priv, vpc_bit); break; case 5: diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c index 43ad33ea234b..825c27e7a4c1 100644 --- a/drivers/power/qcom/msm-core.c +++ b/drivers/power/qcom/msm-core.c @@ -190,10 +190,12 @@ static void core_temp_notify(enum thermal_trip_type type, struct cpu_activity_info *cpu_node = (struct cpu_activity_info *) data; + temp /= scaling_factor; + trace_temp_notification(cpu_node->sensor_id, type, temp, cpu_node->temp); - cpu_node->temp = temp / scaling_factor; + cpu_node->temp = temp; complete(&sampling_completion); } diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c index e17c65c1e4e7..209263ccced7 100644 --- a/drivers/power/reset/msm-poweroff.c +++ b/drivers/power/reset/msm-poweroff.c @@ -389,6 +389,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd) msm_trigger_wdog_bite(); #endif + scm_disable_sdi(); halt_spmi_pmic_arbiter(); deassert_ps_hold(); diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index 0908eea3b186..7b7f991ecba9 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -1847,6 +1847,12 @@ static int smb2_post_init(struct smb2 *chip) struct smb_charger *chg = &chip->chg; int rc; + /* In case the usb path is suspended, we would have missed disabling + * the icl change interrupt because the interrupt could have been + * not requested + */ + rerun_election(chg->usb_icl_votable); + /* configure power role for dual-role */ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, TYPEC_POWER_ROLE_CMD_MASK, 0); @@ -2196,6 +2202,8 @@ static int smb2_request_interrupts(struct smb2 *chip) return rc; } } + if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq) + chg->usb_icl_change_irq_enabled = true; return rc; } diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 16db425514c3..64ed53185e88 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -427,6 +427,14 @@ static int step_charge_soc_update(struct smb_charger *chg, int capacity) int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend) { int rc = 0; + int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq; + + if (suspend && irq) { + if (chg->usb_icl_change_irq_enabled) { + disable_irq_nosync(irq); + chg->usb_icl_change_irq_enabled = false; + } + } rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0); @@ -434,6 +442,13 @@ int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend) smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n", suspend ? "suspend" : "resume", rc); + if (!suspend && irq) { + if (!chg->usb_icl_change_irq_enabled) { + enable_irq(irq); + chg->usb_icl_change_irq_enabled = true; + } + } + return rc; } @@ -885,7 +900,6 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua) if (icl_ua < USBIN_25MA) return smblib_set_usb_suspend(chg, true); - disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq); if (icl_ua == INT_MAX) goto override_suspend_config; @@ -943,7 +957,6 @@ override_suspend_config: } enable_icl_changed_interrupt: - enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq); return rc; } diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index c97b84c34084..18714827b8d2 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -328,6 +328,7 @@ struct smb_charger { int fake_input_current_limited; bool pr_swap_in_progress; int typec_mode; + int usb_icl_change_irq_enabled; /* workaround flag */ u32 wa_flags; diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c index d57bf2f3b80c..e46b1d583f40 100644 --- a/drivers/pwm/pwm-qpnp.c +++ b/drivers/pwm/pwm-qpnp.c @@ -317,6 +317,7 @@ struct _qpnp_pwm_config { struct pwm_period_config period; int supported_sizes; int force_pwm_size; + bool update_period; }; /* Public facing structure */ @@ -1211,28 +1212,32 @@ static int _pwm_config(struct qpnp_pwm_chip *chip, rc = qpnp_lpg_save_pwm_value(chip); if (rc) goto out; - rc = qpnp_lpg_configure_pwm(chip); - if (rc) - goto out; - rc = qpnp_configure_pwm_control(chip); - if (rc) - goto out; - if (!rc && chip->enabled) { - rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE); - if (rc) { - pr_err("Error in configuring pwm state, rc=%d\n", rc); - return rc; - } + if (pwm_config->update_period) { + rc = qpnp_lpg_configure_pwm(chip); + if (rc) + goto out; + rc = qpnp_configure_pwm_control(chip); + if (rc) + goto out; + if (!rc && chip->enabled) { + rc = qpnp_lpg_configure_pwm_state(chip, + QPNP_PWM_ENABLE); + if (rc) { + pr_err("Error in configuring pwm state, rc=%d\n", + rc); + return rc; + } - /* Enable the glitch removal after PWM is enabled */ - rc = qpnp_lpg_glitch_removal(chip, true); - if (rc) { - pr_err("Error in enabling glitch control, rc=%d\n", rc); - return rc; + /* Enable the glitch removal after PWM is enabled */ + rc = qpnp_lpg_glitch_removal(chip, true); + if (rc) { + pr_err("Error in enabling glitch control, rc=%d\n", + rc); + return rc; + } } } - pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n", (unsigned int)duty_value, (unsigned int)period_value, (tm_lvl == LVL_USEC) ? "usec" : "nsec", @@ -1375,12 +1380,14 @@ static int qpnp_pwm_config(struct pwm_chip *pwm_chip, spin_lock_irqsave(&chip->lpg_lock, flags); + chip->pwm_config.update_period = false; if (prev_period_us > INT_MAX / NSEC_PER_USEC || prev_period_us * NSEC_PER_USEC != period_ns) { qpnp_lpg_calc_period(LVL_NSEC, period_ns, chip); qpnp_lpg_save_period(chip); pwm->period = period_ns; chip->pwm_config.pwm_period = period_ns / NSEC_PER_USEC; + chip->pwm_config.update_period = true; } rc = _pwm_config(chip, LVL_NSEC, duty_ns, period_ns); @@ -1619,6 +1626,7 @@ int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us) spin_lock_irqsave(&chip->lpg_lock, flags); + chip->pwm_config.update_period = false; if (chip->pwm_config.pwm_period != period_us) { qpnp_lpg_calc_period(LVL_USEC, period_us, chip); qpnp_lpg_save_period(chip); @@ -1628,6 +1636,7 @@ int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us) pwm->period = 0; else pwm->period = (unsigned int)period_us * NSEC_PER_USEC; + chip->pwm_config.update_period = true; } rc = _pwm_config(chip, LVL_USEC, duty_us, period_us); @@ -1734,6 +1743,7 @@ static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node, qpnp_lpg_calc_period(LVL_USEC, period, chip); qpnp_lpg_save_period(chip); chip->pwm_config.pwm_period = period; + chip->pwm_config.update_period = true; rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index adf621430b41..188920367e84 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2361,6 +2361,14 @@ static void regulator_disable_work(struct work_struct *work) count = rdev->deferred_disables; rdev->deferred_disables = 0; + /* + * Workqueue functions queue the new work instance while the previous + * work instance is being processed. Cancel the queued work instance + * as the work instance under processing does the job of the queued + * work instance. + */ + cancel_delayed_work(&rdev->disable_work); + for (i = 0; i < count; i++) { ret = _regulator_disable(rdev); if (ret != 0) @@ -2404,10 +2412,10 @@ int regulator_disable_deferred(struct regulator *regulator, int ms) mutex_lock(&rdev->mutex); rdev->deferred_disables++; + mod_delayed_work(system_power_efficient_wq, &rdev->disable_work, + msecs_to_jiffies(ms)); mutex_unlock(&rdev->mutex); - queue_delayed_work(system_power_efficient_wq, &rdev->disable_work, - msecs_to_jiffies(ms)); return 0; } EXPORT_SYMBOL_GPL(regulator_disable_deferred); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 59ced8864b2f..0e6aaef9a038 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3563,12 +3563,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f5aeda8f014f..38e90d9c2ced 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 0e59731f95ad..1f6a3b86965f 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2466,6 +2466,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f57d96984ae4..e6faa0b050d1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -2865,7 +2865,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9b3af788376c..8aa202faafb5 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2497,7 +2497,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 7dbbb29d24c6..03a2aadf0d3c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -533,7 +533,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + unsigned long flags; int req_size; + int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); @@ -561,8 +563,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, req_size = sizeof(cmd->req.cmd); } - if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; + } return 0; } diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 6358d1256bb1..fccbdf313e08 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_MACH_DOVE) += dove/ obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ obj-$(CONFIG_ARCH_QCOM) += qcom/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ +obj-$(CONFIG_QCOM_SCM_QCPE) += qcom/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ obj-$(CONFIG_SOC_TI) += ti/ diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 0c4414d27eeb..907960cfa9d5 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -379,6 +379,10 @@ config QCOM_SCM bool "Secure Channel Manager (SCM) support" default n +config QCOM_SCM_QCPE + bool "Para-Virtualized Secure Channel Manager (SCM) support over QCPE" + default n + menuconfig QCOM_SCM_XPU bool "Qualcomm XPU configuration driver" depends on QCOM_SCM diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 5eeede23333d..0bf54bedd6ea 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -62,6 +62,7 @@ CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) obj-$(CONFIG_QCOM_SCM_ERRATA) += scm-errata.o obj-$(CONFIG_QCOM_SCM) += scm.o scm-boot.o +obj-$(CONFIG_QCOM_SCM_QCPE) += scm_qcpe.o obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 21be894414bc..c74cd3b814cd 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) "icnss: " fmt #include <asm/dma-iommu.h> +#include <linux/of_address.h> #include <linux/clk.h> #include <linux/iommu.h> #include <linux/export.h> @@ -365,6 +366,7 @@ struct icnss_stats { uint32_t vbatt_req; uint32_t vbatt_resp; uint32_t vbatt_req_err; + u32 rejuvenate_ind; uint32_t rejuvenate_ack_req; uint32_t rejuvenate_ack_resp; uint32_t rejuvenate_ack_err; @@ -456,6 +458,10 @@ static struct icnss_priv { struct icnss_wlan_mac_addr wlan_mac_addr; bool bypass_s1_smmu; struct mutex dev_lock; + u8 cause_for_rejuvenation; + u8 requesting_sub_system; + u16 line_number; + char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1]; } *penv; #ifdef CONFIG_ICNSS_DEBUG @@ -1691,6 +1697,60 @@ out: return ret; } +static int icnss_decode_rejuvenate_ind(void *msg, unsigned int msg_len) +{ + struct msg_desc ind_desc; + struct wlfw_rejuvenate_ind_msg_v01 ind_msg; + int ret = 0; + + if (!penv || !penv->wlfw_clnt) { + ret = -ENODEV; + goto out; + } + + memset(&ind_msg, 0, sizeof(ind_msg)); + + ind_desc.msg_id = QMI_WLFW_REJUVENATE_IND_V01; + ind_desc.max_msg_len = WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN; + ind_desc.ei_array = wlfw_rejuvenate_ind_msg_v01_ei; + + ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + if (ret < 0) { + icnss_pr_err("Failed to decode rejuvenate ind message: ret %d, msg_len %u\n", + ret, msg_len); + goto out; + } + + if (ind_msg.cause_for_rejuvenation_valid) + penv->cause_for_rejuvenation = ind_msg.cause_for_rejuvenation; + else + penv->cause_for_rejuvenation = 0; + if (ind_msg.requesting_sub_system_valid) + penv->requesting_sub_system = ind_msg.requesting_sub_system; + else + penv->requesting_sub_system = 0; + if (ind_msg.line_number_valid) + penv->line_number = ind_msg.line_number; + else + penv->line_number = 0; + if (ind_msg.function_name_valid) + memcpy(penv->function_name, ind_msg.function_name, + QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1); + else + memset(penv->function_name, 0, + QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1); + + icnss_pr_info("Cause for rejuvenation: 0x%x, requesting sub-system: 0x%x, line number: %u, function name: %s\n", + penv->cause_for_rejuvenation, + penv->requesting_sub_system, + penv->line_number, + penv->function_name); + + penv->stats.rejuvenate_ind++; +out: + return ret; +} + static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv) { int ret; @@ -1884,6 +1944,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle, msg_id, penv->state); icnss_ignore_qmi_timeout(true); + icnss_decode_rejuvenate_ind(msg, msg_len); event_data = kzalloc(sizeof(*event_data), GFP_KERNEL); if (event_data == NULL) return; @@ -3786,6 +3847,26 @@ static int icnss_stats_show_capability(struct seq_file *s, return 0; } +static int icnss_stats_show_rejuvenate_info(struct seq_file *s, + struct icnss_priv *priv) +{ + if (priv->stats.rejuvenate_ind) { + seq_puts(s, "\n<---------------- Rejuvenate Info ----------------->\n"); + seq_printf(s, "Number of Rejuvenations: %u\n", + priv->stats.rejuvenate_ind); + seq_printf(s, "Cause for Rejuvenation: 0x%x\n", + priv->cause_for_rejuvenation); + seq_printf(s, "Requesting Sub-System: 0x%x\n", + priv->requesting_sub_system); + seq_printf(s, "Line Number: %u\n", + priv->line_number); + seq_printf(s, "Function Name: %s\n", + priv->function_name); + } + + return 0; +} + static int icnss_stats_show_events(struct seq_file *s, struct icnss_priv *priv) { int i; @@ -3851,6 +3932,7 @@ static int icnss_stats_show(struct seq_file *s, void *data) ICNSS_STATS_DUMP(s, priv, vbatt_req); ICNSS_STATS_DUMP(s, priv, vbatt_resp); ICNSS_STATS_DUMP(s, priv, vbatt_req_err); + ICNSS_STATS_DUMP(s, priv, rejuvenate_ind); ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_req); ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_resp); ICNSS_STATS_DUMP(s, priv, rejuvenate_ack_err); @@ -3875,6 +3957,8 @@ static int icnss_stats_show(struct seq_file *s, void *data) icnss_stats_show_capability(s, priv); + icnss_stats_show_rejuvenate_info(s, priv); + icnss_stats_show_events(s, priv); icnss_stats_show_state(s, priv); @@ -4215,6 +4299,9 @@ static int icnss_probe(struct platform_device *pdev) int i; struct device *dev = &pdev->dev; struct icnss_priv *priv; + const __be32 *addrp; + u64 prop_size = 0; + struct device_node *np; if (penv) { icnss_pr_err("Driver is already initialized\n"); @@ -4286,24 +4373,53 @@ static int icnss_probe(struct platform_device *pdev) } } - ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory", - &priv->msa_mem_size); + np = of_parse_phandle(dev->of_node, + "qcom,wlan-msa-fixed-region", 0); + if (np) { + addrp = of_get_address(np, 0, &prop_size, NULL); + if (!addrp) { + icnss_pr_err("Failed to get assigned-addresses or property\n"); + ret = -EINVAL; + goto out; + } - if (ret || priv->msa_mem_size == 0) { - icnss_pr_err("Fail to get MSA Memory Size: %u, ret: %d\n", - priv->msa_mem_size, ret); - goto out; - } + priv->msa_pa = of_translate_address(np, addrp); + if (priv->msa_pa == OF_BAD_ADDR) { + icnss_pr_err("Failed to translate MSA PA from device-tree\n"); + ret = -EINVAL; + goto out; + } - priv->msa_va = dmam_alloc_coherent(&pdev->dev, priv->msa_mem_size, - &priv->msa_pa, GFP_KERNEL); - if (!priv->msa_va) { - icnss_pr_err("DMA alloc failed for MSA\n"); - ret = -ENOMEM; - goto out; + priv->msa_va = memremap(priv->msa_pa, + (unsigned long)prop_size, MEMREMAP_WT); + if (!priv->msa_va) { + icnss_pr_err("MSA PA ioremap failed: phy addr: %pa\n", + &priv->msa_pa); + ret = -EINVAL; + goto out; + } + priv->msa_mem_size = prop_size; + } else { + ret = of_property_read_u32(dev->of_node, "qcom,wlan-msa-memory", + &priv->msa_mem_size); + if (ret || priv->msa_mem_size == 0) { + icnss_pr_err("Fail to get MSA Memory Size: %u ret: %d\n", + priv->msa_mem_size, ret); + goto out; + } + + priv->msa_va = dmam_alloc_coherent(&pdev->dev, + priv->msa_mem_size, &priv->msa_pa, GFP_KERNEL); + + if (!priv->msa_va) { + icnss_pr_err("DMA alloc failed for MSA\n"); + ret = -ENOMEM; + goto out; + } } - icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p\n", &priv->msa_pa, - priv->msa_va); + + icnss_pr_dbg("MSA pa: %pa, MSA va: 0x%p MSA Memory Size: 0x%x\n", + &priv->msa_pa, (void *)priv->msa_va, priv->msa_mem_size); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smmu_iova_base"); diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c index e5f6104bd7de..adf4078818a5 100644 --- a/drivers/soc/qcom/ipc_router_mhi_xprt.c +++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c @@ -132,12 +132,11 @@ struct ipc_router_mhi_xprt { struct ipc_router_mhi_xprt_work { struct ipc_router_mhi_xprt *mhi_xprtp; enum MHI_CLIENT_CHANNEL chan_id; - struct work_struct work; }; static void mhi_xprt_read_data(struct work_struct *work); -static void mhi_xprt_enable_event(struct work_struct *work); -static void mhi_xprt_disable_event(struct work_struct *work); +static void mhi_xprt_enable_event(struct ipc_router_mhi_xprt_work *xprt_work); +static void mhi_xprt_disable_event(struct ipc_router_mhi_xprt_work *xprt_work); /** * ipc_router_mhi_xprt_config - Config. Info. of each MHI XPRT @@ -574,8 +573,6 @@ static int ipc_router_mhi_close(struct msm_ipc_router_xprt *xprt) mhi_xprtp->ch_hndl.in_chan_enabled = false; mutex_unlock(&mhi_xprtp->ch_hndl.state_lock); flush_workqueue(mhi_xprtp->wq); - mhi_close_channel(mhi_xprtp->ch_hndl.in_handle); - mhi_close_channel(mhi_xprtp->ch_hndl.out_handle); return 0; } @@ -600,10 +597,8 @@ static void mhi_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt) * * This work is scheduled when the MHI link to the peripheral is up. */ -static void mhi_xprt_enable_event(struct work_struct *work) +static void mhi_xprt_enable_event(struct ipc_router_mhi_xprt_work *xprt_work) { - struct ipc_router_mhi_xprt_work *xprt_work = - container_of(work, struct ipc_router_mhi_xprt_work, work); struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp; int rc; bool notify = false; @@ -613,7 +608,7 @@ static void mhi_xprt_enable_event(struct work_struct *work) if (rc) { IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n", __func__, mhi_xprtp->ch_hndl.out_chan_id, rc); - goto out_enable_event; + return; } mutex_lock(&mhi_xprtp->ch_hndl.state_lock); mhi_xprtp->ch_hndl.out_chan_enabled = true; @@ -625,7 +620,7 @@ static void mhi_xprt_enable_event(struct work_struct *work) if (rc) { IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n", __func__, mhi_xprtp->ch_hndl.in_chan_id, rc); - goto out_enable_event; + return; } mutex_lock(&mhi_xprtp->ch_hndl.state_lock); mhi_xprtp->ch_hndl.in_chan_enabled = true; @@ -643,11 +638,11 @@ static void mhi_xprt_enable_event(struct work_struct *work) } if (xprt_work->chan_id != mhi_xprtp->ch_hndl.in_chan_id) - goto out_enable_event; + return; rc = mhi_xprt_queue_in_buffers(mhi_xprtp, mhi_xprtp->ch_hndl.num_trbs); if (rc > 0) - goto out_enable_event; + return; IPC_RTR_ERR("%s: Could not queue one TRB atleast\n", __func__); mutex_lock(&mhi_xprtp->ch_hndl.state_lock); @@ -656,9 +651,6 @@ static void mhi_xprt_enable_event(struct work_struct *work) if (notify) msm_ipc_router_xprt_notify(&mhi_xprtp->xprt, IPC_ROUTER_XPRT_EVENT_CLOSE, NULL); - mhi_close_channel(mhi_xprtp->ch_hndl.in_handle); -out_enable_event: - kfree(xprt_work); } /** @@ -667,10 +659,8 @@ out_enable_event: * * This work is scheduled when the MHI link to the peripheral is down. */ -static void mhi_xprt_disable_event(struct work_struct *work) +static void mhi_xprt_disable_event(struct ipc_router_mhi_xprt_work *xprt_work) { - struct ipc_router_mhi_xprt_work *xprt_work = - container_of(work, struct ipc_router_mhi_xprt_work, work); struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp; bool notify = false; @@ -681,7 +671,6 @@ static void mhi_xprt_disable_event(struct work_struct *work) mhi_xprtp->ch_hndl.out_chan_enabled = false; mutex_unlock(&mhi_xprtp->ch_hndl.state_lock); wake_up(&mhi_xprtp->write_wait_q); - mhi_close_channel(mhi_xprtp->ch_hndl.out_handle); } else if (xprt_work->chan_id == mhi_xprtp->ch_hndl.in_chan_id) { mutex_lock(&mhi_xprtp->ch_hndl.state_lock); notify = mhi_xprtp->ch_hndl.out_chan_enabled && @@ -691,7 +680,6 @@ static void mhi_xprt_disable_event(struct work_struct *work) /* Queue a read work to remove any partially read packets */ queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work); flush_workqueue(mhi_xprtp->wq); - mhi_close_channel(mhi_xprtp->ch_hndl.in_handle); } if (notify) { @@ -702,7 +690,6 @@ static void mhi_xprt_disable_event(struct work_struct *work) __func__, mhi_xprtp->xprt.name); wait_for_completion(&mhi_xprtp->sft_close_complete); } - kfree(xprt_work); } /** @@ -743,7 +730,7 @@ static void mhi_xprt_xfer_event(struct mhi_cb_info *cb_info) static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info) { struct ipc_router_mhi_xprt *mhi_xprtp; - struct ipc_router_mhi_xprt_work *xprt_work; + struct ipc_router_mhi_xprt_work xprt_work; if (cb_info->result == NULL) { IPC_RTR_ERR("%s: Result not available in cb_info\n", __func__); @@ -751,23 +738,16 @@ static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info) } mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data); + xprt_work.mhi_xprtp = mhi_xprtp; + xprt_work.chan_id = cb_info->chan; switch (cb_info->cb_reason) { - case MHI_CB_MHI_ENABLED: + case MHI_CB_MHI_SHUTDOWN: + case MHI_CB_SYS_ERROR: case MHI_CB_MHI_DISABLED: - xprt_work = kmalloc(sizeof(*xprt_work), GFP_KERNEL); - if (!xprt_work) { - IPC_RTR_ERR("%s: Couldn't handle %d event on %s\n", - __func__, cb_info->cb_reason, - mhi_xprtp->xprt_name); - return; - } - xprt_work->mhi_xprtp = mhi_xprtp; - xprt_work->chan_id = cb_info->chan; - if (cb_info->cb_reason == MHI_CB_MHI_ENABLED) - INIT_WORK(&xprt_work->work, mhi_xprt_enable_event); - else - INIT_WORK(&xprt_work->work, mhi_xprt_disable_event); - queue_work(mhi_xprtp->wq, &xprt_work->work); + mhi_xprt_disable_event(&xprt_work); + break; + case MHI_CB_MHI_ENABLED: + mhi_xprt_enable_event(&xprt_work); break; case MHI_CB_XFER: mhi_xprt_xfer_event(cb_info); @@ -788,22 +768,37 @@ static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info) * This function is called when a new XPRT is added. */ static int ipc_router_mhi_driver_register( - struct ipc_router_mhi_xprt *mhi_xprtp) + struct ipc_router_mhi_xprt *mhi_xprtp, struct device *dev) { - int rc_status; - - rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle, NULL); - if (rc_status) { + int rc; + const char *node_name = "qcom,mhi"; + struct mhi_client_info_t *mhi_info; + + if (!mhi_is_device_ready(dev, node_name)) + return -EPROBE_DEFER; + + mhi_info = &mhi_xprtp->ch_hndl.out_clnt_info; + mhi_info->chan = mhi_xprtp->ch_hndl.out_chan_id; + mhi_info->dev = dev; + mhi_info->node_name = node_name; + mhi_info->user_data = mhi_xprtp; + rc = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle, mhi_info); + if (rc) { IPC_RTR_ERR("%s: Error %d registering out_chan for %s\n", - __func__, rc_status, mhi_xprtp->xprt_name); + __func__, rc, mhi_xprtp->xprt_name); return -EFAULT; } - rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle, NULL); - if (rc_status) { + mhi_info = &mhi_xprtp->ch_hndl.in_clnt_info; + mhi_info->chan = mhi_xprtp->ch_hndl.in_chan_id; + mhi_info->dev = dev; + mhi_info->node_name = node_name; + mhi_info->user_data = mhi_xprtp; + rc = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle, mhi_info); + if (rc) { mhi_deregister_channel(mhi_xprtp->ch_hndl.out_handle); IPC_RTR_ERR("%s: Error %d registering in_chan for %s\n", - __func__, rc_status, mhi_xprtp->xprt_name); + __func__, rc, mhi_xprtp->xprt_name); return -EFAULT; } return 0; @@ -820,7 +815,8 @@ static int ipc_router_mhi_driver_register( * the MHI XPRT configurations from device tree. */ static int ipc_router_mhi_config_init( - struct ipc_router_mhi_xprt_config *mhi_xprt_config) + struct ipc_router_mhi_xprt_config *mhi_xprt_config, + struct device *dev) { struct ipc_router_mhi_xprt *mhi_xprtp; char wq_name[XPRT_NAME_LEN]; @@ -879,7 +875,7 @@ static int ipc_router_mhi_config_init( INIT_LIST_HEAD(&mhi_xprtp->rx_addr_map_list); spin_lock_init(&mhi_xprtp->rx_addr_map_list_lock); - rc = ipc_router_mhi_driver_register(mhi_xprtp); + rc = ipc_router_mhi_driver_register(mhi_xprtp, dev); return rc; } @@ -963,7 +959,7 @@ static int ipc_router_mhi_xprt_probe(struct platform_device *pdev) return rc; } - rc = ipc_router_mhi_config_init(&mhi_xprt_config); + rc = ipc_router_mhi_config_init(&mhi_xprt_config, &pdev->dev); if (rc) { IPC_RTR_ERR("%s: init failed\n", __func__); return rc; diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c new file mode 100644 index 000000000000..54a978157bda --- /dev/null +++ b/drivers/soc/qcom/scm_qcpe.c @@ -0,0 +1,1137 @@ +/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/delay.h> + +#include <asm/cacheflush.h> +#include <asm/compiler.h> + +#include <soc/qcom/scm.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/scm.h> + +#include <uapi/linux/habmm.h> + +#define SCM_ENOMEM (-5) +#define SCM_EOPNOTSUPP (-4) +#define SCM_EINVAL_ADDR (-3) +#define SCM_EINVAL_ARG (-2) +#define SCM_ERROR (-1) +#define SCM_INTERRUPTED (1) +#define SCM_EBUSY (-55) +#define SCM_V2_EBUSY (-12) + +static DEFINE_MUTEX(scm_lock); + +/* + * MSM8996 V2 requires a lock to protect against + * concurrent accesses between the limits management + * driver and the clock controller + */ +DEFINE_MUTEX(scm_lmh_lock); + +#define SCM_EBUSY_WAIT_MS 30 +#define SCM_EBUSY_MAX_RETRY 67 + +#define N_EXT_SCM_ARGS 7 +#define FIRST_EXT_ARG_IDX 3 +#define SMC_ATOMIC_SYSCALL 31 +#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1) +#define SMC64_MASK 0x40000000 +#define SMC_ATOMIC_MASK 0x80000000 +#define IS_CALL_AVAIL_CMD 1 + +#define SCM_BUF_LEN(__cmd_size, __resp_size) ({ \ + size_t x = __cmd_size + __resp_size; \ + size_t y = sizeof(struct scm_command) + sizeof(struct scm_response); \ + size_t result; \ + if (x < __cmd_size || (x + y) < x) \ + result = 0; \ + else \ + result = x + y; \ + result; \ + }) +/** + * struct scm_command - one SCM command buffer + * @len: total available memory for command and response + * @buf_offset: start of command buffer + * @resp_hdr_offset: start of response buffer + * @id: command to be executed + * @buf: buffer returned from scm_get_command_buffer() + * + * An SCM command is laid out in memory as follows: + * + * ------------------- <--- struct scm_command + * | command header | + * ------------------- <--- scm_get_command_buffer() + * | command buffer | + * ------------------- <--- struct scm_response and + * | response header | scm_command_to_response() + * ------------------- <--- scm_get_response_buffer() + * | response buffer | + * ------------------- + * + * There can be arbitrary padding between the headers and buffers so + * you should always use the appropriate scm_get_*_buffer() routines + * to access the buffers in a safe manner. + */ +struct scm_command { + u32 len; + u32 buf_offset; + u32 resp_hdr_offset; + u32 id; + u32 buf[0]; +}; + +/** + * struct scm_response - one SCM response buffer + * @len: total available memory for response + * @buf_offset: start of response data relative to start of scm_response + * @is_complete: indicates if the command has finished processing + */ +struct scm_response { + u32 len; + u32 buf_offset; + u32 is_complete; +}; + +#ifdef CONFIG_ARM64 + +#define R0_STR "x0" +#define R1_STR "x1" +#define R2_STR "x2" +#define R3_STR "x3" +#define R4_STR "x4" +#define R5_STR "x5" +#define R6_STR "x6" + +/* Outer caches unsupported on ARM64 platforms */ +#define outer_inv_range(x, y) +#define outer_flush_range(x, y) + +#define __cpuc_flush_dcache_area __flush_dcache_area + +#else + +#define R0_STR "r0" +#define R1_STR "r1" +#define R2_STR "r2" +#define R3_STR "r3" +#define R4_STR "r4" +#define R5_STR "r5" +#define R6_STR "r6" + +#endif + +/** + * scm_command_to_response() - Get a pointer to a scm_response + * @cmd: command + * + * Returns a pointer to a response for a command. + */ +static inline struct scm_response *scm_command_to_response( + const struct scm_command *cmd) +{ + return (void *)cmd + cmd->resp_hdr_offset; +} + +/** + * scm_get_command_buffer() - Get a pointer to a command buffer + * @cmd: command + * + * Returns a pointer to the command buffer of a command. + */ +static inline void *scm_get_command_buffer(const struct scm_command *cmd) +{ + return (void *)cmd->buf; +} + +/** + * scm_get_response_buffer() - Get a pointer to a response buffer + * @rsp: response + * + * Returns a pointer to a response buffer of a response. + */ +static inline void *scm_get_response_buffer(const struct scm_response *rsp) +{ + return (void *)rsp + rsp->buf_offset; +} + +static int scm_remap_error(int err) +{ + switch (err) { + case SCM_ERROR: + return -EIO; + case SCM_EINVAL_ADDR: + case SCM_EINVAL_ARG: + return -EINVAL; + case SCM_EOPNOTSUPP: + return -EOPNOTSUPP; + case SCM_ENOMEM: + return -ENOMEM; + case SCM_EBUSY: + return SCM_EBUSY; + case SCM_V2_EBUSY: + return SCM_V2_EBUSY; + } + return -EINVAL; +} + +static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) +{ + static bool opened; + static u32 handle; + u32 ret; + u32 size_bytes; + + struct smc_params_s { + uint64_t x0; + uint64_t x1; + uint64_t x2; + uint64_t x3; + uint64_t x4; + uint64_t x5; + uint64_t sid; + } smc_params; + + pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx", + fn_id, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->args[3], desc->args[4], + desc->args[5], desc->args[6]); + + if (!opened) { + ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0); + if (ret != HAB_OK) { + pr_err("scm_call2: habmm_socket_open failed with ret = %d", + ret); + return ret; + } + opened = true; + } + + smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */ + smc_params.x1 = desc->arginfo; + smc_params.x2 = desc->args[0]; + smc_params.x3 = desc->args[1]; + smc_params.x4 = desc->args[2]; + smc_params.x5 = desc->x5; + smc_params.sid = 0; + + ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0); + if (ret != HAB_OK) + return ret; + + size_bytes = sizeof(smc_params); + + ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0); + if (ret != HAB_OK) + return ret; + + desc->ret[0] = smc_params.x1; + desc->ret[1] = smc_params.x2; + desc->ret[2] = smc_params.x3; + + pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx", + desc->ret[0], desc->ret[1], desc->ret[2]); + + return 0; +} + +static u32 smc(u32 cmd_addr) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = 1; + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = cmd_addr; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} + +static int __scm_call(const struct scm_command *cmd) +{ + int ret; + u32 cmd_addr = virt_to_phys(cmd); + + /* + * Flush the command buffer so that the secure world sees + * the correct data. + */ + __cpuc_flush_dcache_area((void *)cmd, cmd->len); + outer_flush_range(cmd_addr, cmd_addr + cmd->len); + + ret = smc(cmd_addr); + if (ret < 0) { + if (ret != SCM_EBUSY) + pr_err("scm_call failed with error code %d\n", ret); + ret = scm_remap_error(ret); + } + return ret; +} + +#ifndef CONFIG_ARM64 +static void scm_inv_range(unsigned long start, unsigned long end) +{ + u32 cacheline_size, ctr; + + asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr)); + cacheline_size = 4 << ((ctr >> 16) & 0xf); + + start = round_down(start, cacheline_size); + end = round_up(end, cacheline_size); + outer_inv_range(start, end); + while (start < end) { + asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start) + : "memory"); + start += cacheline_size; + } + mb(); /* Make sure memory is visible to TZ */ + isb(); +} +#else + +static void scm_inv_range(unsigned long start, unsigned long end) +{ + dmac_inv_range((void *)start, (void *)end); +} +#endif + +/** + * scm_call_common() - Send an SCM command + * @svc_id: service identifier + * @cmd_id: command identifier + * @cmd_buf: command buffer + * @cmd_len: length of the command buffer + * @resp_buf: response buffer + * @resp_len: length of the response buffer + * @scm_buf: internal scm structure used for passing data + * @scm_buf_len: length of the internal scm structure + * + * Core function to scm call. Initializes the given cmd structure with + * appropriate values and makes the actual scm call. Validation of cmd + * pointer and length must occur in the calling function. + * + * Returns the appropriate error code from the scm call + */ + +static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + struct scm_command *scm_buf, + size_t scm_buf_length) +{ + int ret; + struct scm_response *rsp; + unsigned long start, end; + + scm_buf->len = scm_buf_length; + scm_buf->buf_offset = offsetof(struct scm_command, buf); + scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len; + scm_buf->id = (svc_id << 10) | cmd_id; + + if (cmd_buf) + memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len); + + mutex_lock(&scm_lock); + ret = __scm_call(scm_buf); + mutex_unlock(&scm_lock); + if (ret) + return ret; + + rsp = scm_command_to_response(scm_buf); + start = (unsigned long)rsp; + + do { + scm_inv_range(start, start + sizeof(*rsp)); + } while (!rsp->is_complete); + + end = (unsigned long)scm_get_response_buffer(rsp) + resp_len; + scm_inv_range(start, end); + + if (resp_buf) + memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len); + + return ret; +} + +/* + * Sometimes the secure world may be busy waiting for a particular resource. + * In those situations, it is expected that the secure world returns a special + * error code (SCM_EBUSY). Retry any scm_call that fails with this error code, + * but with a timeout in place. Also, don't move this into scm_call_common, + * since we want the first attempt to be the "fastpath". + */ +static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + struct scm_command *cmd, + size_t len) +{ + int ret, retry_count = 0; + + do { + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len, cmd, len); + if (ret == SCM_EBUSY) + msleep(SCM_EBUSY_WAIT_MS); + if (retry_count == 33) + pr_warn("scm: secure world has been busy for 1 second!\n"); + } while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY)); + + if (ret == SCM_EBUSY) + pr_err("scm: secure world busy (rc = SCM_EBUSY)\n"); + + return ret; +} + +/** + * scm_call_noalloc - Send an SCM command + * + * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for + * scm internal structures. The buffer should be allocated with + * DEFINE_SCM_BUFFER to account for the proper alignment and size. + */ +int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len, + void *scm_buf, size_t scm_buf_len) +{ + int ret; + size_t len = SCM_BUF_LEN(cmd_len, resp_len); + + if (len == 0) + return -EINVAL; + + if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE)) + return -EINVAL; + + memset(scm_buf, 0, scm_buf_len); + + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, + resp_len, scm_buf, len); + return ret; + +} + +struct scm_extra_arg { + union { + u32 args32[N_EXT_SCM_ARGS]; + u64 args64[N_EXT_SCM_ARGS]; + }; +}; + +static enum scm_interface_version { + SCM_UNKNOWN, + SCM_LEGACY, + SCM_ARMV8_32, + SCM_ARMV8_64, +} scm_version = SCM_UNKNOWN; + +/* This will be set to specify SMC32 or SMC64 */ +static u32 scm_version_mask; + +bool is_scm_armv8(void) +{ + int ret; + u64 ret1, x0; + + struct scm_desc desc = {0}; + + if (likely(scm_version != SCM_UNKNOWN)) + return (scm_version == SCM_ARMV8_32) || + (scm_version == SCM_ARMV8_64); + /* + * This is a one time check that runs on the first ever + * invocation of is_scm_armv8. We might be called in atomic + * context so no mutexes etc. Also, we can't use the scm_call2 + * or scm_call2_APIs directly since they depend on this init. + */ + + /* First try a SMC64 call */ + scm_version = SCM_ARMV8_64; + ret1 = 0; + x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK; + + desc.arginfo = SCM_ARGS(1); + desc.args[0] = x0; + + ret = scm_call_qcpe(x0 | SMC64_MASK, &desc); + + ret1 = desc.arginfo; + + if (ret || !ret1) { + /* Try SMC32 call */ + ret1 = 0; + + desc.arginfo = SCM_ARGS(1); + desc.args[0] = x0; + + ret = scm_call_qcpe(x0, &desc); + + if (ret || !ret1) + scm_version = SCM_LEGACY; + else + scm_version = SCM_ARMV8_32; + } else + scm_version_mask = SMC64_MASK; + + pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version, + scm_version_mask); + + return (scm_version == SCM_ARMV8_32) || + (scm_version == SCM_ARMV8_64); +} +EXPORT_SYMBOL(is_scm_armv8); + +/* + * If there are more than N_REGISTER_ARGS, allocate a buffer and place + * the additional arguments in it. The extra argument buffer will be + * pointed to by X5. + */ +static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags) +{ + int i, j; + struct scm_extra_arg *argbuf; + int arglen = desc->arginfo & 0xf; + size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg)); + + desc->x5 = desc->args[FIRST_EXT_ARG_IDX]; + + if (likely(arglen <= N_REGISTER_ARGS)) { + desc->extra_arg_buf = NULL; + return 0; + } + + argbuf = kzalloc(argbuflen, flags); + if (!argbuf) + return -ENOMEM; + + desc->extra_arg_buf = argbuf; + + j = FIRST_EXT_ARG_IDX; + if (scm_version == SCM_ARMV8_64) + for (i = 0; i < N_EXT_SCM_ARGS; i++) + argbuf->args64[i] = desc->args[j++]; + else + for (i = 0; i < N_EXT_SCM_ARGS; i++) + argbuf->args32[i] = desc->args[j++]; + desc->x5 = virt_to_phys(argbuf); + __cpuc_flush_dcache_area(argbuf, argbuflen); + outer_flush_range(virt_to_phys(argbuf), + virt_to_phys(argbuf) + argbuflen); + + return 0; +} + +/** + * scm_call2() - Invoke a syscall in the secure world + * @fn_id: The function ID for this syscall + * @desc: Descriptor structure containing arguments and return values + * + * Sends a command to the SCM and waits for the command to finish processing. + * This should *only* be called in pre-emptible context. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking scm_call and invalidated in the cache + * immediately after scm_call returns. An important point that must be noted + * is that on ARMV8 architectures, invalidation actually also causes a dirty + * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of + * paramount importance that the buffer be flushed before invoking scm_call2, + * even if you don't care about the contents of that buffer. + * + * Note that cache maintenance on the argument buffer (desc->args) is taken care + * of by scm_call2; however, callers are responsible for any other cached + * buffers passed over to the secure world. +*/ +int scm_call2(u32 fn_id, struct scm_desc *desc) +{ + int arglen = desc->arginfo & 0xf; + int ret; + u64 x0; + + if (unlikely(!is_scm_armv8())) + return -ENODEV; + + ret = allocate_extra_arg_buffer(desc, GFP_NOIO); + if (ret) + return ret; + + x0 = fn_id | scm_version_mask; + + mutex_lock(&scm_lock); + + if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH) + mutex_lock(&scm_lmh_lock); + + desc->ret[0] = desc->ret[1] = desc->ret[2] = 0; + + trace_scm_call_start(x0, desc); + + ret = scm_call_qcpe(x0, desc); + + trace_scm_call_end(desc); + + if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH) + mutex_unlock(&scm_lmh_lock); + + mutex_unlock(&scm_lock); + + if (ret < 0) + pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n", + x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]); + + if (arglen > N_REGISTER_ARGS) + kfree(desc->extra_arg_buf); + if (ret < 0) + return scm_remap_error(ret); + return 0; +} +EXPORT_SYMBOL(scm_call2); + +/** + * scm_call2_atomic() - Invoke a syscall in the secure world + * + * Similar to scm_call2 except that this can be invoked in atomic context. + * There is also no retry mechanism implemented. Please ensure that the + * secure world syscall can be executed in such a context and can complete + * in a timely manner. + */ +int scm_call2_atomic(u32 fn_id, struct scm_desc *desc) +{ + int arglen = desc->arginfo & 0xf; + int ret; + u64 x0; + + if (unlikely(!is_scm_armv8())) + return -ENODEV; + + ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC); + if (ret) + return ret; + + x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask; + + pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n", + x0, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->x5); + + ret = scm_call_qcpe(x0, desc); + + if (ret < 0) + pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n", + x0, desc->arginfo, desc->args[0], desc->args[1], + desc->args[2], desc->x5, ret, desc->ret[0], + desc->ret[1], desc->ret[2]); + + if (arglen > N_REGISTER_ARGS) + kfree(desc->extra_arg_buf); + if (ret < 0) + return scm_remap_error(ret); + return ret; +} + +/** + * scm_call() - Send an SCM command + * @svc_id: service identifier + * @cmd_id: command identifier + * @cmd_buf: command buffer + * @cmd_len: length of the command buffer + * @resp_buf: response buffer + * @resp_len: length of the response buffer + * + * Sends a command to the SCM and waits for the command to finish processing. + * + * A note on cache maintenance: + * Note that any buffers that are expected to be accessed by the secure world + * must be flushed before invoking scm_call and invalidated in the cache + * immediately after scm_call returns. Cache maintenance on the command and + * response buffers is taken care of by scm_call; however, callers are + * responsible for any other cached buffers passed over to the secure world. + */ +int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, + void *resp_buf, size_t resp_len) +{ + struct scm_command *cmd; + int ret; + size_t len = SCM_BUF_LEN(cmd_len, resp_len); + + if (len == 0 || PAGE_ALIGN(len) < len) + return -EINVAL; + + cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf, + resp_len, cmd, len); + if (unlikely(ret == SCM_EBUSY)) + ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len, cmd, PAGE_ALIGN(len)); + kfree(cmd); + return ret; +} +EXPORT_SYMBOL(scm_call); + +#define SCM_CLASS_REGISTER (0x2 << 8) +#define SCM_MASK_IRQS BIT(5) +#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \ + SCM_CLASS_REGISTER | \ + SCM_MASK_IRQS | \ + (n & 0xf)) + +/** + * scm_call_atomic1() - Send an atomic SCM command with one argument + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic1); + +/** + * scm_call_atomic1_1() - SCM command with one argument and one return value + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @ret1: first return value + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic1_1); + +/** + * scm_call_atomic2() - Send an atomic SCM command with two arguments + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic2); + +/** + * scm_call_atomic3() - Send an atomic SCM command with three arguments + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * @arg3: third argument + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3) +{ + int context_id; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic3); + +s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, + u32 arg3, u32 arg4, u32 *ret1, u32 *ret2) +{ + int ret; + int context_id; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + register u32 r5 asm("r5") = arg4; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + desc.args[3] = r5; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + *ret2 = desc.args[0]; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic4_3); + +/** + * scm_call_atomic5_3() - SCM command with five argument and three return value + * @svc_id: service identifier + * @cmd_id: command identifier + * @arg1: first argument + * @arg2: second argument + * @arg3: third argument + * @arg4: fourth argument + * @arg5: fifth argument + * @ret1: first return value + * @ret2: second return value + * @ret3: third return value + * + * This shall only be used with commands that are guaranteed to be + * uninterruptable, atomic and SMP safe. + */ +s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, + u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3) +{ + int ret; + int context_id; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5); + register u32 r1 asm("r1") = (uintptr_t)&context_id; + register u32 r2 asm("r2") = arg1; + register u32 r3 asm("r3") = arg2; + register u32 r4 asm("r4") = arg3; + register u32 r5 asm("r5") = arg4; + register u32 r6 asm("r6") = arg5; + + x0 = r0; + desc.arginfo = r1; + desc.args[0] = r2; + desc.args[1] = r3; + desc.args[2] = r4; + desc.args[3] = r5; + desc.args[4] = r6; + + ret = scm_call_qcpe(x0, &desc); + + if (ret < 0) + return scm_remap_error(ret); + + *ret1 = desc.arginfo; + *ret2 = desc.args[0]; + *ret3 = desc.args[1]; + + return 0; +} +EXPORT_SYMBOL(scm_call_atomic5_3); + +u32 scm_get_version(void) +{ + int context_id; + static u32 version = -1; + int ret; + uint64_t x0; + struct scm_desc desc = {0}; + + register u32 r0 asm("r0"); + register u32 r1 asm("r1"); + + if (version != -1) + return version; + + mutex_lock(&scm_lock); + + r0 = 0x1 << 8; + r1 = (uintptr_t)&context_id; + + x0 = r0; + desc.arginfo = r1; + + ret = scm_call_qcpe(x0, &desc); + + version = desc.arginfo; + + mutex_unlock(&scm_lock); + + if (ret < 0) + return scm_remap_error(ret); + + return version; +} +EXPORT_SYMBOL(scm_get_version); + +#define SCM_IO_READ 0x1 +#define SCM_IO_WRITE 0x2 + +u32 scm_io_read(phys_addr_t address) +{ + struct scm_desc desc = { + .args[0] = address, + .arginfo = SCM_ARGS(1), + }; + + if (!is_scm_armv8()) + return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address); + + scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc); + return desc.ret[0]; +} +EXPORT_SYMBOL(scm_io_read); + +int scm_io_write(phys_addr_t address, u32 val) +{ + int ret; + + if (!is_scm_armv8()) + ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val); + else { + struct scm_desc desc = { + .args[0] = address, + .args[1] = val, + .arginfo = SCM_ARGS(2), + }; + ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE), + &desc); + } + return ret; +} +EXPORT_SYMBOL(scm_io_write); + +int scm_is_call_available(u32 svc_id, u32 cmd_id) +{ + int ret; + struct scm_desc desc = {0}; + + if (!is_scm_armv8()) { + u32 ret_val = 0; + u32 svc_cmd = (svc_id << 10) | cmd_id; + + ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd, + sizeof(svc_cmd), &ret_val, sizeof(ret_val)); + if (ret) + return ret; + + return ret_val; + } + desc.arginfo = SCM_ARGS(1); + desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc); + if (ret) + return ret; + + return desc.ret[0]; +} +EXPORT_SYMBOL(scm_is_call_available); + +#define GET_FEAT_VERSION_CMD 3 +int scm_get_feat_version(u32 feat) +{ + struct scm_desc desc = {0}; + int ret; + + if (!is_scm_armv8()) { + if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) { + u32 version; + + if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat, + sizeof(feat), &version, sizeof(version))) + return version; + } + return 0; + } + + ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD); + if (ret <= 0) + return 0; + + desc.args[0] = feat; + desc.arginfo = SCM_ARGS(1); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD), + &desc); + if (!ret) + return desc.ret[0]; + + return 0; +} +EXPORT_SYMBOL(scm_get_feat_version); + +#define RESTORE_SEC_CFG 2 +int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) +{ + struct scm_desc desc = {0}; + int ret; + struct restore_sec_cfg { + u32 device_id; + u32 spare; + } cfg; + + cfg.device_id = device_id; + cfg.spare = spare; + + if (IS_ERR_OR_NULL(scm_ret)) + return -EINVAL; + + if (!is_scm_armv8()) + return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg), + scm_ret, sizeof(*scm_ret)); + + desc.args[0] = device_id; + desc.args[1] = spare; + desc.arginfo = SCM_ARGS(2); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc); + if (ret) + return ret; + + *scm_ret = desc.ret[0]; + return 0; +} +EXPORT_SYMBOL(scm_restore_sec_cfg); + +/* + * SCM call command ID to check secure mode + * Return zero for secure device. + * Return one for non secure device or secure + * device with debug enabled device. + */ +#define TZ_INFO_GET_SECURE_STATE 0x4 +bool scm_is_secure_device(void) +{ + struct scm_desc desc = {0}; + int ret = 0, resp; + + desc.args[0] = 0; + desc.arginfo = 0; + if (!is_scm_armv8()) { + ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL, + 0, &resp, sizeof(resp)); + } else { + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, + TZ_INFO_GET_SECURE_STATE), + &desc); + resp = desc.ret[0]; + } + + if (ret) { + pr_err("%s: SCM call failed\n", __func__); + return false; + } + + if ((resp & BIT(0)) || (resp & BIT(2))) + return true; + else + return false; +} +EXPORT_SYMBOL(scm_is_secure_device); diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c index 221ae0c1fefb..f4c67f1cd9d9 100644 --- a/drivers/soc/qcom/service-notifier.c +++ b/drivers/soc/qcom/service-notifier.c @@ -84,6 +84,7 @@ static DEFINE_MUTEX(service_list_lock); struct ind_req_resp { char service_path[SERVREG_NOTIF_NAME_LENGTH]; int transaction_id; + int curr_state; }; /* @@ -200,8 +201,30 @@ static void send_ind_ack(struct work_struct *work) struct qmi_servreg_notif_set_ack_req_msg_v01 req; struct msg_desc req_desc, resp_desc; struct qmi_servreg_notif_set_ack_resp_msg_v01 resp = { { 0, 0 } }; + struct service_notif_info *service_notif; + enum pd_subsys_state state = USER_PD_STATE_CHANGE; int rc; + service_notif = _find_service_info(data->ind_msg.service_path); + if (!service_notif) + return; + if ((int)data->ind_msg.curr_state < QMI_STATE_MIN_VAL || + (int)data->ind_msg.curr_state > QMI_STATE_MAX_VAL) + pr_err("Unexpected indication notification state %d\n", + data->ind_msg.curr_state); + else { + mutex_lock(¬if_add_lock); + mutex_lock(&service_list_lock); + rc = service_notif_queue_notification(service_notif, + data->ind_msg.curr_state, &state); + if (rc & NOTIFY_STOP_MASK) + pr_err("Notifier callback aborted for %s with error %d\n", + data->ind_msg.service_path, rc); + service_notif->curr_state = data->ind_msg.curr_state; + mutex_unlock(&service_list_lock); + mutex_unlock(¬if_add_lock); + } + req.transaction_id = data->ind_msg.transaction_id; snprintf(req.service_name, ARRAY_SIZE(req.service_name), "%s", data->ind_msg.service_path); @@ -236,11 +259,9 @@ static void root_service_service_ind_cb(struct qmi_handle *handle, unsigned int msg_len, void *ind_cb_priv) { struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv; - struct service_notif_info *service_notif; struct msg_desc ind_desc; struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = { QMI_STATE_MIN_VAL, "", 0xFFFF }; - enum pd_subsys_state state = USER_PD_STATE_CHANGE; int rc; ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG; @@ -256,27 +277,8 @@ static void root_service_service_ind_cb(struct qmi_handle *handle, ind_msg.service_name, ind_msg.curr_state, ind_msg.transaction_id); - service_notif = _find_service_info(ind_msg.service_name); - if (!service_notif) - return; - - if ((int)ind_msg.curr_state < QMI_STATE_MIN_VAL || - (int)ind_msg.curr_state > QMI_STATE_MAX_VAL) - pr_err("Unexpected indication notification state %d\n", - ind_msg.curr_state); - else { - mutex_lock(¬if_add_lock); - mutex_lock(&service_list_lock); - rc = service_notif_queue_notification(service_notif, - ind_msg.curr_state, &state); - if (rc & NOTIFY_STOP_MASK) - pr_err("Notifier callback aborted for %s with error %d\n", - ind_msg.service_name, rc); - service_notif->curr_state = ind_msg.curr_state; - mutex_unlock(&service_list_lock); - mutex_unlock(¬if_add_lock); - } data->ind_msg.transaction_id = ind_msg.transaction_id; + data->ind_msg.curr_state = ind_msg.curr_state; snprintf(data->ind_msg.service_path, ARRAY_SIZE(data->ind_msg.service_path), "%s", ind_msg.service_name); @@ -373,6 +375,12 @@ static void root_service_service_arrive(struct work_struct *work) mutex_unlock(&qmi_client_release_lock); pr_info("Connection established between QMI handle and %d service\n", data->instance_id); + /* Register for indication messages about service */ + rc = qmi_register_ind_cb(data->clnt_handle, + root_service_service_ind_cb, (void *)data); + if (rc < 0) + pr_err("Indication callback register failed(instance-id: %d) rc:%d\n", + data->instance_id, rc); mutex_lock(¬if_add_lock); mutex_lock(&service_list_lock); list_for_each_entry(service_notif, &service_list, list) { @@ -395,12 +403,6 @@ static void root_service_service_arrive(struct work_struct *work) } mutex_unlock(&service_list_lock); mutex_unlock(¬if_add_lock); - /* Register for indication messages about service */ - rc = qmi_register_ind_cb(data->clnt_handle, - root_service_service_ind_cb, (void *)data); - if (rc < 0) - pr_err("Indication callback register failed(instance-id: %d) rc:%d\n", - data->instance_id, rc); } static void root_service_service_exit(struct qmi_client_info *data, diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c index 7f03aabf415e..e4afce02afc7 100644 --- a/drivers/soc/qcom/spcom.c +++ b/drivers/soc/qcom/spcom.c @@ -245,7 +245,7 @@ struct spcom_device { int channel_count; /* private */ - struct mutex lock; + struct mutex cmd_lock; /* Link state */ struct completion link_state_changed; @@ -1952,6 +1952,8 @@ static int spcom_handle_write(struct spcom_channel *ch, swap_id = htonl(cmd->cmd_id); memcpy(cmd_name, &swap_id, sizeof(int)); + mutex_lock(&spcom_dev->cmd_lock); + pr_debug("cmd_id [0x%x] cmd_name [%s].\n", cmd_id, cmd_name); switch (cmd_id) { @@ -1972,9 +1974,11 @@ static int spcom_handle_write(struct spcom_channel *ch, break; default: pr_err("Invalid Command Id [0x%x].\n", (int) cmd->cmd_id); - return -EINVAL; + ret = -EINVAL; } + mutex_unlock(&spcom_dev->cmd_lock); + return ret; } @@ -2675,7 +2679,7 @@ static int spcom_probe(struct platform_device *pdev) return -ENOMEM; spcom_dev = dev; - mutex_init(&dev->lock); + mutex_init(&spcom_dev->cmd_lock); init_completion(&dev->link_state_changed); spcom_dev->link_state = GLINK_LINK_STATE_DOWN; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 7d3af3eacf57..1ddba9ae8c0f 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -665,7 +665,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c index aaa96c3df45b..5fbd3766b981 100644 --- a/drivers/staging/android/sync_debug.c +++ b/drivers/staging/android/sync_debug.c @@ -87,7 +87,7 @@ static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) int status = 1; struct sync_timeline *parent = sync_pt_parent(pt); - if (fence_is_signaled_locked(&pt->base)) + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &pt->base.flags)) status = pt->base.status; seq_printf(s, " %s%spt %s", diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 200d3de8bc1e..a180c000e246 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1112,6 +1112,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, */ if (dump_payload) goto after_immediate_data; + /* + * Check for underflow case where both EDTL and immediate data payload + * exceeds what is presented by CDB's TRANSFER LENGTH, and what has + * already been set in target_cmd_size_check() as se_cmd->data_length. + * + * For this special case, fail the command and dump the immediate data + * payload. + */ + if (cmd->first_burst_len > cmd->se_cmd.data_length) { + cmd->sense_reason = TCM_INVALID_CDB_FIELD; + goto after_immediate_data; + } immed_ret = iscsit_handle_immediate_data(cmd, hdr, cmd->first_burst_len); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 253a91bff943..272e6f755322 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -132,7 +132,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); -void transport_cmd_finish_abort(struct se_cmd *, int); +int transport_cmd_finish_abort(struct se_cmd *, int); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 46b1991fbb50..c9be953496ec 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) kfree(tmr); } -static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) +static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) { unsigned long flags; bool remove = true, send_tas; @@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) transport_send_task_abort(cmd); } - transport_cmd_finish_abort(cmd, remove); + return transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -185,8 +185,8 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - transport_cmd_finish_abort(se_cmd, true); - target_put_sess_cmd(se_cmd); + if (!transport_cmd_finish_abort(se_cmd, true)) + target_put_sess_cmd(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); @@ -286,8 +286,8 @@ static void core_tmr_drain_tmr_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - transport_cmd_finish_abort(cmd, 1); - target_put_sess_cmd(cmd); + if (!transport_cmd_finish_abort(cmd, 1)) + target_put_sess_cmd(cmd); } } @@ -385,8 +385,8 @@ static void core_tmr_drain_state_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - core_tmr_handle_tas_abort(cmd, tas); - target_put_sess_cmd(cmd); + if (!core_tmr_handle_tas_abort(cmd, tas)) + target_put_sess_cmd(cmd); } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 60743bf27f37..37c77db6e737 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -639,9 +639,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) percpu_ref_put(&lun->lun_ref); } -void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) +int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); + int ret = 0; if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); @@ -653,9 +654,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) cmd->se_tfo->aborted_task(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) - return; + return 1; if (remove && ack_kref) - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); + + return ret; } static void target_complete_failure_work(struct work_struct *work) diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index 830ef92ffe80..fc9faaee3170 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c @@ -272,7 +272,7 @@ static struct of_device_id msm_hs_match_table[] = { #define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE #define UARTDM_RX_BUF_SIZE 512 #define RETRY_TIMEOUT 5 -#define UARTDM_NR 256 +#define UARTDM_NR 4 #define BAM_PIPE_MIN 0 #define BAM_PIPE_MAX 11 #define BUS_SCALING 1 diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 03aeec2e878c..7cdc95d2b085 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -182,7 +182,7 @@ static void *usbpd_ipc_log; #define PS_HARD_RESET_TIME 25 #define PS_SOURCE_ON 400 #define PS_SOURCE_OFF 750 -#define SWAP_SOURCE_START_TIME 20 +#define FIRST_SOURCE_CAP_TIME 200 #define VDM_BUSY_TIME 50 #define VCONN_ON_TIME 100 @@ -790,17 +790,27 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) pd->pd_phy_opened = true; } - pd->current_state = PE_SRC_SEND_CAPABILITIES; if (pd->in_pr_swap) { - kick_sm(pd, SWAP_SOURCE_START_TIME); pd->in_pr_swap = false; val.intval = 0; power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val); - break; } - /* fall-through */ + /* + * A sink might remove its terminations (during some Type-C + * compliance tests or a sink attempting to do Try.SRC) + * at this point just after we enabled VBUS. Sending PD + * messages now would delay detecting the detach beyond the + * required timing. Instead, delay sending out the first + * source capabilities to allow for the other side to + * completely settle CC debounce and allow HW to detect detach + * sooner in the meantime. PD spec allows up to + * tFirstSourceCap (250ms). + */ + pd->current_state = PE_SRC_SEND_CAPABILITIES; + kick_sm(pd, FIRST_SOURCE_CAP_TIME); + break; case PE_SRC_SEND_CAPABILITIES: kick_sm(pd, 0); diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index de4f93afdc97..e5f38e42e165 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -652,13 +652,16 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) writel_relaxed(intr_mask, qphy->base + QUSB2PHY_INTR_CTRL); - /* enable phy auto-resume */ - writel_relaxed(0x91, + if (linestate & (LINESTATE_DP | LINESTATE_DM)) { + + /* enable phy auto-resume */ + writel_relaxed(0x91, qphy->base + QUSB2PHY_TEST1); - /* flush the previous write before next write */ - wmb(); - writel_relaxed(0x90, - qphy->base + QUSB2PHY_TEST1); + /* flush the previous write before next write */ + wmb(); + writel_relaxed(0x90, + qphy->base + QUSB2PHY_TEST1); + } dev_dbg(phy->dev, "%s: intr_mask = %x\n", __func__, intr_mask); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 7fbe19d5279e..81b2b9f808b5 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -215,14 +215,19 @@ done: static inline void hub_descriptor(struct usb_hub_descriptor *desc) { + int width; + memset(desc, 0, sizeof(*desc)); desc->bDescriptorType = USB_DT_HUB; - desc->bDescLength = 9; desc->wHubCharacteristics = cpu_to_le16( HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); + desc->bNbrPorts = VHCI_NPORTS; - desc->u.hs.DeviceRemovable[0] = 0xff; - desc->u.hs.DeviceRemovable[1] = 0xff; + BUILD_BUG_ON(VHCI_NPORTS > USB_MAXCHILDREN); + width = desc->bNbrPorts / 8 + 1; + desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width; + memset(&desc->u.hs.DeviceRemovable[0], 0, width); + memset(&desc->u.hs.DeviceRemovable[width], 0xff, width); } static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 1a9f18b40be6..34e4b3ad8b92 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1163,6 +1163,10 @@ static int tce_iommu_attach_group(void *iommu_data, /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", iommu_group_id(iommu_group), iommu_group); */ table_group = iommu_group_get_iommudata(iommu_group); + if (!table_group) { + ret = -ENODEV; + goto unlock_exit; + } if (tce_groups_attached(container) && (!table_group->ops || !table_group->ops->take_ownership || diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c index eaacdd875747..4f7a4a1189f4 100644 --- a/drivers/video/fbdev/msm/mdp3_ppp.c +++ b/drivers/video/fbdev/msm/mdp3_ppp.c @@ -1,4 +1,5 @@ /* Copyright (c) 2007, 2013-2014, 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2017, The Linux Foundation. All rights reserved. * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public @@ -39,6 +40,7 @@ #define MDP_PPP_MAX_BPP 4 #define MDP_PPP_DYNAMIC_FACTOR 3 #define MDP_PPP_MAX_READ_WRITE 3 +#define MDP_PPP_MAX_WIDTH 0xFFF #define ENABLE_SOLID_FILL 0x2 #define DISABLE_SOLID_FILL 0x0 #define BLEND_LATENCY 3 @@ -152,6 +154,11 @@ int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req, return -EINVAL; } + if (img->width > MDP_PPP_MAX_WIDTH) { + pr_err("%s incorrect width %d\n", __func__, img->width); + return -EINVAL; + } + fb_data.flags = img->priv; fb_data.memory_id = img->memory_id; fb_data.offset = 0; diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h index d6e8a213c215..5548f0f09f8a 100644 --- a/drivers/video/fbdev/msm/mdss.h +++ b/drivers/video/fbdev/msm/mdss.h @@ -547,6 +547,7 @@ struct mdss_data_type { u32 sec_session_cnt; wait_queue_head_t secure_waitq; struct cx_ipeak_client *mdss_cx_ipeak; + struct mult_factor bus_throughput_factor; }; extern struct mdss_data_type *mdss_res; diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index c8afb3a244c3..2f169074261a 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -2176,6 +2176,10 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp) ret = mdss_dp_dpcd_cap_read(dp); if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) || !mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) { + if (ret == EDP_AUX_ERR_TOUT) { + pr_err("DPCD read timedout, skip connect notification\n"); + goto end; + } /* * If there is an error in parsing DPCD or if DPCD reports * unsupported link parameters then set the default link diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 82f6d4a123b5..4ac10ab494e5 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -909,10 +909,15 @@ static ssize_t mdss_dsi_cmd_write(struct file *file, const char __user *p, /* Writing in batches is possible */ ret = simple_write_to_buffer(string_buf, blen, ppos, p, count); + if (ret < 0) { + pr_err("%s: Failed to copy data\n", __func__); + mutex_unlock(&pcmds->dbg_mutex); + return -EINVAL; + } - string_buf[blen] = '\0'; + string_buf[ret] = '\0'; pcmds->string_buf = string_buf; - pcmds->sblen = blen; + pcmds->sblen = count; mutex_unlock(&pcmds->dbg_mutex); return ret; } diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index 6936c4c1f3cc..6fb32761a767 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -4527,6 +4527,15 @@ static int mdss_mdp_parse_dt_misc(struct platform_device *pdev) mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor", &mdata->clk_factor); + /* + * Bus throughput factor will be used during high downscale cases. + * The recommended default factor is 1.1. + */ + mdata->bus_throughput_factor.numer = 11; + mdata->bus_throughput_factor.denom = 10; + mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-bus-througput-factor", + &mdata->bus_throughput_factor); + rc = of_property_read_u32(pdev->dev.of_node, "qcom,max-bandwidth-low-kbps", &mdata->max_bw_low); if (rc) diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index efd681a5d954..0165c48a0467 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -31,6 +31,7 @@ #define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2 #define NUM_MIXERCFG_REGS 3 #define MDSS_MDP_WB_OUTPUT_BPP 3 +#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35 struct mdss_mdp_mixer_cfg { u32 config_masks[NUM_MIXERCFG_REGS]; bool border_enabled; @@ -622,11 +623,21 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe, } static inline bool __is_vert_downscaling(u32 src_h, - struct mdss_rect dst){ - + struct mdss_rect dst) +{ return (src_h > dst.h); } +static inline bool __is_bus_throughput_factor_required(u32 src_h, + struct mdss_rect dst) +{ + u32 scale_factor = src_h * 10; + + do_div(scale_factor, dst.h); + return (__is_vert_downscaling(src_h, dst) && + (scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR)); +} + static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe, struct mdss_rect src, struct mdss_rect dst, u32 fps, u32 v_total, u32 flags) @@ -673,6 +684,15 @@ static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe, } } + /* + * If the downscale factor is >= 3.5 for a 32 BPP surface, + * it is recommended to add a 10% bus throughput factor to + * the clock rate. + */ + if ((pipe->src_fmt->bpp == 4) && + __is_bus_throughput_factor_required(src_h, dst)) + rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor); + if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE) rate = mdss_mdp_clk_fudge_factor(mixer, rate); diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c index e44edb8fbea7..3ccc09d58480 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_layer.c +++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c @@ -3131,6 +3131,14 @@ int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd, sync_pt_data = &mfd->mdp_sync_pt_data; mutex_lock(&sync_pt_data->sync_mutex); count = sync_pt_data->acq_fen_cnt; + + if (count >= MDP_MAX_FENCE_FD) { + pr_err("Reached maximum possible value for fence count\n"); + mutex_unlock(&sync_pt_data->sync_mutex); + rc = -EINVAL; + goto input_layer_err; + } + sync_pt_data->acq_fen[count] = fence; sync_pt_data->acq_fen_cnt++; mutex_unlock(&sync_pt_data->sync_mutex); diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 953aeb95332b..305fff6b5695 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -5023,12 +5023,15 @@ static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd, break; case metadata_op_get_ion_fd: if (mfd->fb_ion_handle && mfd->fb_ion_client) { + get_dma_buf(mfd->fbmem_buf); metadata->data.fbmem_ionfd = ion_share_dma_buf_fd(mfd->fb_ion_client, mfd->fb_ion_handle); - if (metadata->data.fbmem_ionfd < 0) + if (metadata->data.fbmem_ionfd < 0) { + dma_buf_put(mfd->fbmem_buf); pr_err("fd allocation failed. fd = %d\n", metadata->data.fbmem_ionfd); + } } break; case metadata_op_crc: diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c index 017a2f10dfbc..a5ec7097e0f6 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c +++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -194,8 +194,12 @@ static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config, return -EINVAL; } - memcpy(&hist_lut_usr_config, config->cfg_payload, - sizeof(struct mdp_hist_lut_data_v1_7)); + if (copy_from_user(&hist_lut_usr_config, + (void __user *) config->cfg_payload, + sizeof(hist_lut_usr_config))) { + pr_err("failed to copy hist lut config\n"); + return -EFAULT; + } hist_lut_cache_data = pipe->pp_res.hist_lut_cfg_payload; if (!hist_lut_cache_data) { @@ -606,8 +610,12 @@ static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config, return -EINVAL; } - memcpy(&v17_usr_config, config->cfg_payload, - sizeof(v17_usr_config)); + if (copy_from_user(&v17_usr_config, + (void __user *) config->cfg_payload, + sizeof(v17_usr_config))) { + pr_err("failed to copy pcc config\n"); + return -EFAULT; + } if (!(config->ops & MDP_PP_OPS_WRITE)) { pr_debug("write ops not set value of flag is %d\n", @@ -861,8 +869,12 @@ static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config, goto igc_config_exit; } - memcpy(&v17_usr_config, config->cfg_payload, - sizeof(v17_usr_config)); + if (copy_from_user(&v17_usr_config, + (void __user *) config->cfg_payload, + sizeof(v17_usr_config))) { + pr_err("failed to copy igc usr config\n"); + return -EFAULT; + } if (!(config->ops & MDP_PP_OPS_WRITE)) { pr_debug("op for gamut %d\n", config->ops); @@ -1272,8 +1284,12 @@ static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config, return -EINVAL; } - memcpy(&pa_usr_config, config->cfg_payload, - sizeof(struct mdp_pa_data_v1_7)); + if (copy_from_user(&pa_usr_config, + (void __user *) config->cfg_payload, + sizeof(pa_usr_config))) { + pr_err("failed to copy pa usr config\n"); + return -EFAULT; + } pa_cache_data = pipe->pp_res.pa_cfg_payload; if (!pa_cache_data) { diff --git a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c index d412a1c66e79..5df1ed7c89a5 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c +++ b/drivers/video/fbdev/msm/mdss_mdp_splash_logo.c @@ -175,7 +175,7 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd) ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 1); if (ret) { - pr_debug("mdss set attribute failed for early map\n"); + pr_err("mdss set attribute failed for early map\n"); goto end; } @@ -198,6 +198,9 @@ static int mdss_mdp_splash_iommu_attach(struct msm_fb_data_type *mfd) } ret = mdss_smmu_set_attribute(MDSS_IOMMU_DOMAIN_UNSECURE, EARLY_MAP, 0); + if (ret) + pr_err("mdss reset attribute failed for early map\n"); + end: mdata->handoff_pending = true; diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c index 80dbe83972d7..bb3b4b3fa929 100644 --- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c +++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c @@ -2587,7 +2587,8 @@ int mdss_dsi_post_clkon_cb(void *priv, } if (clk & MDSS_DSI_LINK_CLK) { /* toggle the resync FIFO everytime clock changes */ - if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30) + if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_30) && + !pdata->panel_info.cont_splash_enabled) mdss_dsi_phy_v3_toggle_resync_fifo(ctrl); if (ctrl->ulps) { diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index e0c98423f2c9..11a72bc2c71b 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c @@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) if (!wdt) return -ENOMEM; + spin_lock_init(&wdt->lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt->base = devm_ioremap_resource(dev, res); if (IS_ERR(wdt->base)) @@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) return ret; } - spin_lock_init(&wdt->lock); platform_set_drvdata(pdev, wdt); watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); bcm_kona_wdt_wdd.parent = &pdev->dev; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 7399782c0998..8a58bbc14de2 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, size, dir, attrs); - dev_addr = xen_phys_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sgl) = 0; return 0; } + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, sg->length, dir, attrs); - sg->dma_address = xen_phys_to_bus(map); + sg->dma_address = dev_addr; } else { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index ac7d921ed984..257425511d10 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -331,7 +331,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, int status; token = (autofs_wqt_t) param->fail.token; - status = param->fail.status ? param->fail.status : -ENOENT; + status = param->fail.status < 0 ? param->fail.status : -ENOENT; return autofs4_wait_release(sbi, token, status); } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0c52941dd62c..6c031dd1bc4e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2295,6 +2295,7 @@ static int elf_core_dump(struct coredump_params *cprm) goto end_coredump; } } + dump_truncate(cprm); if (!elf_core_write_extra_data(cprm)) goto end_coredump; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 863fa0f1972b..a61926cb01c0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4397,8 +4397,19 @@ search_again: if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) + if (item_end < new_size) { + /* + * With NO_HOLES mode, for the following mapping + * + * [0-4k][hole][8k-12k] + * + * if truncating isize down to 6k, it ends up + * isize being 8k. + */ + if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) + last_size = new_size; break; + } if (found_key.offset >= new_size) del_item = 1; else diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 87b87e091e8e..efd72e1fae74 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { - return CIFSFindFirst(xid, tcon, path, cifs_sb, - &fid->netfid, search_flags, srch_inf, true); + int rc; + + rc = CIFSFindFirst(xid, tcon, path, cifs_sb, + &fid->netfid, search_flags, srch_inf, true); + if (rc) + cifs_dbg(FYI, "find first failed=%d\n", rc); + return rc; } static int diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 087918c4612a..1d125d3d0d89 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -909,7 +909,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); kfree(utf16_path); if (rc) { - cifs_dbg(VFS, "open dir failed\n"); + cifs_dbg(FYI, "open dir failed rc=%d\n", rc); return rc; } @@ -919,7 +919,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, fid->volatile_fid, 0, srch_inf); if (rc) { - cifs_dbg(VFS, "query directory failed\n"); + cifs_dbg(FYI, "query directory failed rc=%d\n", rc); SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } return rc; diff --git a/fs/coredump.c b/fs/coredump.c index fe0a28da18a6..2ce5ef429c48 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -810,3 +810,21 @@ int dump_align(struct coredump_params *cprm, int align) return mod ? dump_skip(cprm, align - mod) : 1; } EXPORT_SYMBOL(dump_align); + +/* + * Ensures that file size is big enough to contain the current file + * postion. This prevents gdb from complaining about a truncated file + * if the last "write" to the file was dump_skip. + */ +void dump_truncate(struct coredump_params *cprm) +{ + struct file *file = cprm->file; + loff_t offset; + + if (file->f_op->llseek && file->f_op->llseek != no_llseek) { + offset = file->f_op->llseek(file, 0, SEEK_CUR); + if (i_size_read(file->f_mapping->host) < offset) + do_truncate(file->f_path.dentry, offset, 0, file); + } +} +EXPORT_SYMBOL(dump_truncate); diff --git a/fs/dcache.c b/fs/dcache.c index 3a3b0f370304..4c07a84d1317 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb) LIST_HEAD(dispose); freed = list_lru_walk(&sb->s_dentry_lru, - dentry_lru_isolate_shrink, &dispose, UINT_MAX); + dentry_lru_isolate_shrink, &dispose, 1024); this_cpu_sub(nr_dentry_unused, freed); shrink_dentry_list(&dispose); - } while (freed > 0); + cond_resched(); + } while (list_lru_count(&sb->s_dentry_lru) > 0); } EXPORT_SYMBOL(shrink_dcache_sb); diff --git a/fs/exec.c b/fs/exec.c index b1f5ddbf7d56..073ae12b396e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -206,8 +206,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; + unsigned long ptr_size; struct rlimit *rlim; + /* + * Since the stack will hold pointers to the strings, we + * must account for them as well. + * + * The size calculation is the entire vma while each arg page is + * built, so each time we get here it's calculating how far it + * is currently (rather than each call being just the newly + * added size from the arg page). As a result, we need to + * always add the entire size of the pointers, so that on the + * last call to get_arg_page() we'll actually have the entire + * correct size. + */ + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + if (ptr_size > ULONG_MAX - size) + goto fail; + size += ptr_size; + acct_arg_size(bprm, size / PAGE_SIZE); /* @@ -225,13 +243,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * to work from. */ rlim = current->signal->rlim; - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { - put_page(page); - return NULL; - } + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + goto fail; } return page; + +fail: + put_page(page); + return NULL; } static void put_arg_page(struct page *page) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4e3679b25b9b..8e425f2c5ddd 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2188,8 +2188,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) return 0; - /* even though OPEN succeeded, access is denied. Close the file */ - nfs4_close_state(state, fmode); return -EACCES; } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 709fbbd44c65..acebc350e98d 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2070,13 +2070,13 @@ unlock: spin_unlock(&o2hb_live_lock); } -static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", o2hb_dead_threshold); } -static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item, const char *page, size_t count) { unsigned long tmp; @@ -2125,11 +2125,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item, } -CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold); +CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold); CONFIGFS_ATTR(o2hb_heartbeat_group_, mode); static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { - &o2hb_heartbeat_group_attr_threshold, + &o2hb_heartbeat_group_attr_dead_threshold, &o2hb_heartbeat_group_attr_mode, NULL, }; diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2eb66decc5ab..b3b95e2ae2ff 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -121,11 +121,12 @@ static void squashfs_process_blocks(struct squashfs_read_request *req) if (req->data_processing == SQUASHFS_METADATA) { /* Extract the length of the metadata block */ - if (req->offset != msblk->devblksize - 1) - length = *((u16 *)(bh[0]->b_data + req->offset)); - else { - length = bh[0]->b_data[req->offset]; - length |= bh[1]->b_data[0] << 8; + if (req->offset != msblk->devblksize - 1) { + length = le16_to_cpup((__le16 *) + (bh[0]->b_data + req->offset)); + } else { + length = (unsigned char)bh[0]->b_data[req->offset]; + length |= (unsigned char)bh[1]->b_data[0] << 8; } req->compressed = SQUASHFS_COMPRESSED(length); req->data_processing = req->compressed ? SQUASHFS_DECOMPRESS @@ -211,8 +212,8 @@ static int bh_is_optional(struct squashfs_read_request *req, int idx) int start_idx, end_idx; struct squashfs_sb_info *msblk = req->sb->s_fs_info; - start_idx = (idx * msblk->devblksize - req->offset) / PAGE_CACHE_SIZE; - end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) / PAGE_CACHE_SIZE; + start_idx = (idx * msblk->devblksize - req->offset) >> PAGE_SHIFT; + end_idx = ((idx + 1) * msblk->devblksize - req->offset + 1) >> PAGE_SHIFT; if (start_idx >= req->output->pages) return 1; if (start_idx < 0) diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d016a121a8c4..28ffa94aed6b 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -14,6 +14,7 @@ struct coredump_params; extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); +extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP extern void do_coredump(const siginfo_t *siginfo); #else diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 2a6b9947aaa3..743b34f56f2b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -44,6 +44,7 @@ struct list_lru_node { /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ struct list_lru_memcg *memcg_lrus; #endif + long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 5219df44cfec..ae8d475a9385 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -23,9 +23,13 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CMA + MR_CMA, + MR_TYPES }; +/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ +extern char *migrate_reason_names[MR_TYPES]; + #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); diff --git a/include/linux/mm.h b/include/linux/mm.h index e599eb35c10e..7d0b5e7bcadb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -529,7 +529,6 @@ void put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9d1161a8d6b7..2b1be7efde55 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -71,6 +71,9 @@ enum { */ extern int *get_migratetype_fallbacks(int mtype); +/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ +extern char * const migratetype_names[MIGRATE_TYPES]; + #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 17f118a82854..03f2a3e7d76d 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #include <linux/types.h> #include <linux/stacktrace.h> +#include <linux/stackdepot.h> struct pglist_data; struct page_ext_operations { @@ -44,8 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; - unsigned int nr_entries; - unsigned long trace_entries[8]; + int last_migrate_reason; + depot_stack_handle_t handle; #endif }; diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; static inline bool page_is_young(struct page *page) { - return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline void set_page_young(struct page *page) { - set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool test_and_clear_page_young(struct page *page) { - return test_and_clear_bit(PAGE_EXT_YOUNG, - &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); } static inline bool page_is_idle(struct page *page) { - return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return false; + + return test_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void set_page_idle(struct page *page) { - set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + set_bit(PAGE_EXT_IDLE, &page_ext->flags); } static inline void clear_page_idle(struct page *page) { - clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + + clear_bit(PAGE_EXT_IDLE, &page_ext->flags); } #endif /* CONFIG_64BIT */ diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index cacaabea8a09..30583ab0ffb1 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -1,38 +1,52 @@ #ifndef __LINUX_PAGE_OWNER_H #define __LINUX_PAGE_OWNER_H +#include <linux/jump_label.h> + #ifdef CONFIG_PAGE_OWNER -extern bool page_owner_inited; +extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern gfp_t __get_page_owner_gfp(struct page *page); +extern void __split_page_owner(struct page *page, unsigned int order); +extern void __copy_page_owner(struct page *oldpage, struct page *newpage); +extern void __set_page_owner_migrate_reason(struct page *page, int reason); +extern void __dump_page_owner(struct page *page); static inline void reset_page_owner(struct page *page, unsigned int order) { - if (likely(!page_owner_inited)) - return; - - __reset_page_owner(page, order); + if (static_branch_unlikely(&page_owner_inited)) + __reset_page_owner(page, order); } static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { - if (likely(!page_owner_inited)) - return; - - __set_page_owner(page, order, gfp_mask); + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner(page, order, gfp_mask); } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, unsigned int order) { - if (likely(!page_owner_inited)) - return 0; - - return __get_page_owner_gfp(page); + if (static_branch_unlikely(&page_owner_inited)) + __split_page_owner(page, order); +} +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ + if (static_branch_unlikely(&page_owner_inited)) + __copy_page_owner(oldpage, newpage); +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ + if (static_branch_unlikely(&page_owner_inited)) + __set_page_owner_migrate_reason(page, reason); +} +static inline void dump_page_owner(struct page *page) +{ + if (static_branch_unlikely(&page_owner_inited)) + __dump_page_owner(page); } #else static inline void reset_page_owner(struct page *page, unsigned int order) @@ -42,10 +56,18 @@ static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, + unsigned int order) +{ +} +static inline void copy_page_owner(struct page *oldpage, struct page *newpage) +{ +} +static inline void set_page_owner_migrate_reason(struct page *page, int reason) +{ +} +static inline void dump_page_owner(struct page *page) { - return 0; } - #endif /* CONFIG_PAGE_OWNER */ #endif /* __LINUX_PAGE_OWNER_H */ diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h new file mode 100644 index 000000000000..7978b3e2c1e1 --- /dev/null +++ b/include/linux/stackdepot.h @@ -0,0 +1,32 @@ +/* + * A generic stack depot implementation + * + * Author: Alexander Potapenko <glider@google.com> + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_STACKDEPOT_H +#define _LINUX_STACKDEPOT_H + +typedef u32 depot_stack_handle_t; + +struct stack_trace; + +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags); + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace); + +#endif diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 25247220b4b7..f0f1793cfa49 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,7 +29,6 @@ */ struct tk_read_base { struct clocksource *clock; - cycle_t (*read)(struct clocksource *cs); cycle_t mask; cycle_t cycle_last; u32 mult; diff --git a/include/net/cnss2.h b/include/net/cnss2.h new file mode 100644 index 000000000000..5409e1b15a25 --- /dev/null +++ b/include/net/cnss2.h @@ -0,0 +1,192 @@ +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _NET_CNSS2_H +#define _NET_CNSS2_H + +#include <linux/pci.h> + +#define CNSS_MAX_FILE_NAME 20 +#define CNSS_MAX_TIMESTAMP_LEN 32 + +enum cnss_bus_width_type { + CNSS_BUS_WIDTH_NONE, + CNSS_BUS_WIDTH_LOW, + CNSS_BUS_WIDTH_MEDIUM, + CNSS_BUS_WIDTH_HIGH +}; + +enum cnss_platform_cap_flag { + CNSS_HAS_EXTERNAL_SWREG = 0x01, + CNSS_HAS_UART_ACCESS = 0x02, +}; + +struct cnss_platform_cap { + u32 cap_flag; +}; + +struct cnss_fw_files { + char image_file[CNSS_MAX_FILE_NAME]; + char board_data[CNSS_MAX_FILE_NAME]; + char otp_data[CNSS_MAX_FILE_NAME]; + char utf_file[CNSS_MAX_FILE_NAME]; + char utf_board_data[CNSS_MAX_FILE_NAME]; + char epping_file[CNSS_MAX_FILE_NAME]; + char evicted_data[CNSS_MAX_FILE_NAME]; +}; + +struct cnss_soc_info { + void __iomem *va; + phys_addr_t pa; + uint32_t chip_id; + uint32_t chip_family; + uint32_t board_id; + uint32_t soc_id; + uint32_t fw_version; + char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1]; +}; + +struct cnss_wlan_runtime_ops { + int (*runtime_suspend)(struct pci_dev *pdev); + int (*runtime_resume)(struct pci_dev *pdev); +}; + +struct cnss_wlan_driver { + char *name; + int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id); + void (*remove)(struct pci_dev *pdev); + int (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id); + void (*shutdown)(struct pci_dev *pdev); + void (*crash_shutdown)(struct pci_dev *pdev); + int (*suspend)(struct pci_dev *pdev, pm_message_t state); + int (*resume)(struct pci_dev *pdev); + int (*suspend_noirq)(struct pci_dev *pdev); + int (*resume_noirq)(struct pci_dev *pdev); + void (*modem_status)(struct pci_dev *, int state); + void (*update_status)(struct pci_dev *pdev, uint32_t status); + struct cnss_wlan_runtime_ops *runtime_ops; + const struct pci_device_id *id_table; +}; + +enum cnss_driver_status { + CNSS_UNINITIALIZED, + CNSS_INITIALIZED, + CNSS_LOAD_UNLOAD, + CNSS_RECOVERY, +}; + +struct cnss_ce_tgt_pipe_cfg { + u32 pipe_num; + u32 pipe_dir; + u32 nentries; + u32 nbytes_max; + u32 flags; + u32 reserved; +}; + +struct cnss_ce_svc_pipe_cfg { + u32 service_id; + u32 pipe_dir; + u32 pipe_num; +}; + +struct cnss_shadow_reg_cfg { + u16 ce_id; + u16 reg_offset; +}; + +struct cnss_shadow_reg_v2_cfg { + u32 addr; +}; + +struct cnss_wlan_enable_cfg { + u32 num_ce_tgt_cfg; + struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg; + u32 num_ce_svc_pipe_cfg; + struct cnss_ce_svc_pipe_cfg *ce_svc_cfg; + u32 num_shadow_reg_cfg; + struct cnss_shadow_reg_cfg *shadow_reg_cfg; + u32 num_shadow_reg_v2_cfg; + struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg; +}; + +enum cnss_driver_mode { + CNSS_MISSION, + CNSS_FTM, + CNSS_EPPING, + CNSS_WALTEST, + CNSS_OFF, + CNSS_CCPM, + CNSS_QVIT, + CNSS_CALIBRATION, +}; + +enum cnss_recovery_reason { + CNSS_REASON_DEFAULT, + CNSS_REASON_LINK_DOWN, + CNSS_REASON_RDDM, + CNSS_REASON_TIMEOUT, +}; + +extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver); +extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver); +extern void cnss_device_crashed(void); +extern int cnss_pci_link_down(struct device *dev); +extern void cnss_schedule_recovery(struct device *dev, + enum cnss_recovery_reason reason); +extern int cnss_self_recovery(struct device *dev, + enum cnss_recovery_reason reason); +extern int cnss_force_fw_assert(struct device *dev); +extern void *cnss_get_virt_ramdump_mem(unsigned long *size); +extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files, + u32 target_type, u32 target_version); +extern int cnss_get_platform_cap(struct cnss_platform_cap *cap); +extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info); +extern void cnss_set_driver_status(enum cnss_driver_status driver_status); +extern int cnss_request_bus_bandwidth(int bandwidth); +extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count); +extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count, + u16 buf_len); +extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len); +extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len); +extern int cnss_power_up(struct device *dev); +extern int cnss_power_down(struct device *dev); +extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num); +extern void cnss_request_pm_qos(u32 qos_val); +extern void cnss_remove_pm_qos(void); +extern void cnss_lock_pm_sem(void); +extern void cnss_release_pm_sem(void); +extern int cnss_wlan_pm_control(bool vote); +extern int cnss_auto_suspend(void); +extern int cnss_auto_resume(void); +extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name, + int *num_vectors, + uint32_t *user_base_data, + uint32_t *base_vector); +extern int cnss_get_msi_irq(struct device *dev, unsigned int vector); +extern void cnss_get_msi_address(struct device *dev, uint32_t *msi_addr_low, + uint32_t *msi_addr_high); +extern int cnss_wlan_enable(struct device *dev, + struct cnss_wlan_enable_cfg *config, + enum cnss_driver_mode mode, + const char *host_version); +extern int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode); +extern unsigned int cnss_get_qmi_timeout(void); +extern int cnss_athdiag_read(struct device *dev, uint32_t offset, + uint32_t mem_type, uint32_t data_len, + uint8_t *output); +extern int cnss_athdiag_write(struct device *dev, uint32_t offset, + uint32_t mem_type, uint32_t data_len, + uint8_t *input); +extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode); + +#endif /* _NET_CNSS2_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d18cbafc3455..c81d806e415f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -947,10 +947,6 @@ struct xfrm_dst { struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; -#ifdef CONFIG_XFRM_SUB_POLICY - struct flowi *origin; - struct xfrm_selector *partner; -#endif u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; @@ -966,12 +962,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); -#ifdef CONFIG_XFRM_SUB_POLICY - kfree(xdst->origin); - xdst->origin = NULL; - kfree(xdst->partner); - xdst->partner = NULL; -#endif } #endif diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h index ad57eda97f9d..af389305207f 100644 --- a/include/soc/qcom/scm.h +++ b/include/soc/qcom/scm.h @@ -95,7 +95,7 @@ struct scm_desc { u64 x5; }; -#ifdef CONFIG_QCOM_SCM +#if defined(CONFIG_QCOM_SCM) || defined(CONFIG_QCOM_SCM_QCPE) extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len, void *resp_buf, size_t resp_len); @@ -230,7 +230,7 @@ static inline int scm_io_write(phys_addr_t address, u32 val) return 0; } -inline bool scm_is_secure_device(void) +static inline bool scm_is_secure_device(void) { return false; } diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h index 74995a0cdbad..2680a13721c2 100644 --- a/include/sound/apr_audio-v2.h +++ b/include/sound/apr_audio-v2.h @@ -446,6 +446,88 @@ struct adm_param_data_v5 { */ } __packed; + +struct param_data_v6 { + /* Unique ID of the module. */ + u32 module_id; + /* Unique ID of the instance. */ + u16 instance_id; + /* Reserved for future enhancements. + * This field must be set to zero. + */ + u16 reserved; + /* Unique ID of the parameter. */ + u32 param_id; + /* Data size of the param_id/module_id combination. + * This value is a + * multiple of 4 bytes. + */ + u32 param_size; +} __packed; + +/* ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command is used to set + * calibration data to the ADSP Matrix Mixer the payload is + * of struct adm_cmd_set_mtmx_params_v1. + * + * ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1 can be used to get + * the calibration data from the ADSP Matrix Mixer and + * ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1 is the response + * ioctl to ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1. + */ +#define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 0x00010367 +#define ADM_CMD_GET_MTMX_STRTR_DEV_PARAMS_V1 0x00010368 +#define ADM_CMDRSP_GET_MTMX_STRTR_DEV_PARAMS_V1 0x00010369 + +/* Payload of the #define ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1 command. + * If the data_payload_addr_lsw and data_payload_addr_msw element + * are NULL, a series of struct param_data_v6 structures immediately + * follows, whose total size is payload_size bytes. + */ +struct adm_cmd_set_mtmx_params_v1 { + struct apr_hdr hdr; + /* LSW of parameter data payload address.*/ + u32 payload_addr_lsw; + + /* MSW of parameter data payload address.*/ + u32 payload_addr_msw; + + /* Memory map handle returned by ADM_CMD_SHARED_MEM_MAP_REGIONS + * command. + * If mem_map_handle is zero it implies the message is in + * the payload + */ + u32 mem_map_handle; + + /* Size in bytes of the variable payload accompanying this + * message or in shared memory. This is used for parsing + * the parameter payload. + */ + u32 payload_size; + + /* COPP ID/Device ID */ + u16 copp_id; + + /* For alignment, must be set to 0 */ + u16 reserved; +} __packed; + +struct enable_param_v6 { + /* + * Specifies whether the Audio processing module is enabled. + * This parameter is generic/common parameter to configure or + * determine the state of any audio processing module. + */ + struct param_data_v6 param; + + /* @values 0 : Disable 1: Enable */ + uint32_t enable; +} __packed; + +/* Defined in ADSP as VOICE_MODULE_TX_STREAM_LIMITER but + * used for RX stream limiter on matrix input to ADM. + */ +#define ADM_MTMX_MODULE_STREAM_LIMITER 0x00010F15 + #define ASM_STREAM_CMD_REGISTER_PP_EVENTS 0x00013213 #define ASM_STREAM_PP_EVENT 0x00013214 #define ASM_STREAM_CMD_REGISTER_IEC_61937_FMT_UPDATE 0x13333 @@ -625,6 +707,29 @@ struct audproc_softvolume_params { */ #define AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT 0x00010913 +/* ID of the Channel Mixer module, which is used to configure + * channel-mixer related parameters. + * This module supports the AUDPROC_CHMIXER_PARAM_ID_COEFF parameter ID. + */ +#define AUDPROC_MODULE_ID_CHMIXER 0x00010341 + +/* ID of the Coefficient parameter used by AUDPROC_MODULE_ID_CHMIXER to + *configure the channel mixer weighting coefficients. + */ +#define AUDPROC_CHMIXER_PARAM_ID_COEFF 0x00010342 + +/* Payload of the per-session, per-device parameter data of the + * #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5 command or + * #ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V6 command. + * Immediately following this structure are param_size bytes of parameter + * data. The structure and size depend on the module_id/param_id pair. + */ +struct adm_pspd_param_data_t { + uint32_t module_id; + uint32_t param_id; + uint16_t param_size; + uint16_t reserved; +} __packed; struct audproc_mfc_output_media_fmt { struct adm_cmd_set_pp_params_v5 params; @@ -3902,6 +4007,14 @@ struct asm_softvolume_params { u32 rampingcurve; } __packed; +struct asm_stream_pan_ctrl_params { + uint16_t num_output_channels; + uint16_t num_input_channels; + uint16_t output_channel_map[8]; + uint16_t input_channel_map[8]; + uint16_t gain[64]; +} __packed; + #define ASM_END_POINT_DEVICE_MATRIX 0 #define PCM_CHANNEL_NULL 0 @@ -7802,6 +7915,48 @@ struct asm_volume_ctrl_lr_chan_gain { /*< Linear gain in Q13 format for the right channel.*/ } __packed; +struct audproc_chmixer_param_coeff { + uint32_t index; + uint16_t num_output_channels; + uint16_t num_input_channels; +} __packed; + + +/* ID of the Multichannel Volume Control parameters used by + * AUDPROC_MODULE_ID_VOL_CTRL. + */ +#define AUDPROC_PARAM_ID_MULTICHANNEL_GAIN 0x00010713 + +/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_GAIN channel type/gain + * pairs used by the Volume Control module. + * This structure immediately follows the + * audproc_volume_ctrl_multichannel_gain_t structure. + */ +struct audproc_volume_ctrl_channel_type_gain_pair { + uint8_t channel_type; + /* Channel type for which the gain setting is to be applied. */ + + uint8_t reserved1; + uint8_t reserved2; + uint8_t reserved3; + + uint32_t gain; + /* Gain value for this channel in Q28 format. */ +} __packed; + +/* Payload of the AUDPROC_PARAM_ID_MULTICHANNEL_MUTE parameters used by + * the Volume Control module. + */ +struct audproc_volume_ctrl_multichannel_gain { + uint32_t num_channels; + /* Number of channels for which mute configuration is provided. Any + * channels present in the data for which mute configuration is not + * provided are set to unmute. + */ + + struct audproc_volume_ctrl_channel_type_gain_pair *gain_data; + /* Array of channel type/mute setting pairs. */ +} __packed; /* Structure for the mute configuration parameter for a volume control module. */ @@ -9151,7 +9306,6 @@ struct srs_trumedia_params { } __packed; /* SRS TruMedia end */ -#define AUDPROC_PARAM_ID_ENABLE 0x00010904 #define ASM_STREAM_POSTPROC_TOPO_ID_SA_PLUS 0x1000FFFF /* DTS Eagle */ #define AUDPROC_MODULE_ID_DTS_HPX_PREMIX 0x0001077C diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h index e689e9357012..65c42ee18914 100644 --- a/include/sound/q6adm-v2.h +++ b/include/sound/q6adm-v2.h @@ -51,6 +51,13 @@ enum { ADM_CLIENT_ID_MAX, }; +/* ENUM for adm_status & route_status */ +enum adm_status_flags { + ADM_STATUS_CALIBRATION_REQUIRED = 0, + ADM_STATUS_LIMITER, + ADM_STATUS_MAX, +}; + #define MAX_COPPS_PER_PORT 0x8 #define ADM_MAX_CHANNELS 8 @@ -61,6 +68,7 @@ struct route_payload { int app_type[MAX_COPPS_PER_PORT]; int acdb_dev_id[MAX_COPPS_PER_PORT]; int sample_rate[MAX_COPPS_PER_PORT]; + unsigned long route_status[MAX_COPPS_PER_PORT]; unsigned short num_copps; unsigned int session_id; }; @@ -138,9 +146,13 @@ int adm_get_topology_for_port_copp_idx(int port_id, int copp_idx); int adm_get_indexes_from_copp_id(int copp_id, int *port_idx, int *copp_idx); -int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx, - unsigned int session_id, - char *params, uint32_t params_length); +int adm_set_pspd_matrix_params(int port_id, int copp_idx, + unsigned int session_id, + char *params, uint32_t params_length); + +int adm_set_downmix_params(int port_id, int copp_idx, + unsigned int session_id, char *params, + uint32_t params_length); int adm_get_pp_topo_module_list(int port_id, int copp_idx, int32_t param_length, char *params); diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h index d0dffbd15923..177c2f4da32e 100644 --- a/include/sound/q6asm-v2.h +++ b/include/sound/q6asm-v2.h @@ -611,6 +611,14 @@ int q6asm_set_softvolume(struct audio_client *ac, int q6asm_set_softvolume_v2(struct audio_client *ac, struct asm_softvolume_params *param, int instance); +/* Set panning and MFC params */ +int q6asm_set_mfc_panning_params(struct audio_client *ac, + struct asm_stream_pan_ctrl_params *pan_param); + +/* Set vol gain pair */ +int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac, + struct asm_stream_pan_ctrl_params *pan_param); + /* Send left-right channel gain */ int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain); diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index a24c5b909047..b05509af0352 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -114,6 +114,11 @@ static ssize_t write_irq_affinity(int type, struct file *file, goto free_cpumask; } + if (cpumask_subset(new_value, cpu_isolated_mask)) { + err = -EINVAL; + goto free_cpumask; + } + /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least diff --git a/kernel/panic.c b/kernel/panic.c index 982a52352cfc..679254405510 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -172,7 +172,7 @@ void panic(const char *fmt, ...) * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ - pr_emerg("Rebooting in %d seconds..", panic_timeout); + pr_emerg("Rebooting in %d seconds..\n", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index b0b93fd33af9..f8e8d68ed3fd 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -201,8 +201,9 @@ void calc_load_exit_idle(void) struct rq *this_rq = this_rq(); /* - * If we're still before the sample window, we're done. + * If we're still before the pending sample window, we're done. */ + this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update)) return; @@ -211,7 +212,6 @@ void calc_load_exit_idle(void) * accounted through the nohz accounting, so skip the entire deal and * sync up for the next window. */ - this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update + 10)) this_rq->calc_load_update += LOAD_FREQ; } diff --git a/kernel/signal.c b/kernel/signal.c index f3f1f7a972fd..b92a047ddc82 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -503,7 +503,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -525,6 +526,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); + + *resched_timer = + (first->flags & SIGQUEUE_PREALLOC) && + (info->si_code == SI_TIMER) && + (info->si_sys_private); + __sigqueue_free(first); } else { /* @@ -541,12 +548,12 @@ still_pending: } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info) + siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info); + collect_signal(sig, pending, info, resched_timer); return sig; } @@ -558,15 +565,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { + bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ - signr = __dequeue_signal(&tsk->pending, mask, info); + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info); + mask, info, &resched_timer); /* * itimer signal ? * @@ -611,7 +619,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) */ current->jobctl |= JOBCTL_STOP_DEQUEUED; } - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 5fa544f3f560..738f3467d169 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) tk->offs_boot = ktime_add(tk->offs_boot, delta); } +/* + * tk_clock_read - atomic clocksource read() helper + * + * This helper is necessary to use in the read paths because, while the + * seqlock ensures we don't return a bad value while structures are updated, + * it doesn't protect from potential crashes. There is the possibility that + * the tkr's clocksource may change between the read reference, and the + * clock reference passed to the read function. This can cause crashes if + * the wrong clocksource is passed to the wrong read function. + * This isn't necessary to use when holding the timekeeper_lock or doing + * a read of the fast-timekeeper tkrs (which is protected by its own locking + * and update logic). + */ +static inline u64 tk_clock_read(struct tk_read_base *tkr) +{ + struct clocksource *clock = READ_ONCE(tkr->clock); + + return clock->read(clock); +} + #ifdef CONFIG_DEBUG_TIMEKEEPING #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ @@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) */ do { seq = read_seqcount_begin(&tk_core.seq); - now = tkr->read(tkr->clock); + now = tk_clock_read(tkr); last = tkr->cycle_last; mask = tkr->mask; max = tkr->clock->max_cycles; @@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) cycle_t cycle_now, delta; /* read clocksource */ - cycle_now = tkr->read(tkr->clock); + cycle_now = tk_clock_read(tkr); /* calculate the delta since the last update_wall_time */ delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); @@ -235,12 +255,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) old_clock = tk->tkr_mono.clock; tk->tkr_mono.clock = clock; - tk->tkr_mono.read = clock->read; tk->tkr_mono.mask = clock->mask; - tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); + tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); tk->tkr_raw.clock = clock; - tk->tkr_raw.read = clock->read; tk->tkr_raw.mask = clock->mask; tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; @@ -404,7 +422,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) now += timekeeping_delta_to_ns(tkr, clocksource_delta( - tkr->read(tkr->clock), + tk_clock_read(tkr), tkr->cycle_last, tkr->mask)); } while (read_seqcount_retry(&tkf->seq, seq)); @@ -461,6 +479,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs) return cycles_at_suspend; } +static struct clocksource dummy_clock = { + .read = dummy_clock_read, +}; + /** * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. * @tk: Timekeeper to snapshot. @@ -477,13 +499,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - cycles_at_suspend = tkr->read(tkr->clock); - tkr_dummy.read = dummy_clock_read; + cycles_at_suspend = tk_clock_read(tkr); + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); tkr = &tk->tkr_raw; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - tkr_dummy.read = dummy_clock_read; + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); } @@ -647,11 +669,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) */ static void timekeeping_forward_now(struct timekeeper *tk) { - struct clocksource *clock = tk->tkr_mono.clock; cycle_t cycle_now, delta; s64 nsec; - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); tk->tkr_mono.cycle_last = cycle_now; tk->tkr_raw.cycle_last = cycle_now; @@ -1434,7 +1455,7 @@ void timekeeping_resume(void) * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && cycle_now > tk->tkr_mono.cycle_last) { u64 num, max = ULLONG_MAX; @@ -1829,7 +1850,7 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), + offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); #endif diff --git a/lib/Kconfig b/lib/Kconfig index 7f6feb7a4687..4bb61d2e6746 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -547,4 +547,8 @@ config QMI_ENCDEC_DEBUG library. This will log the information regarding the element and message being encoded & decoded. +config STACKDEPOT + bool + select STACKTRACE + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 20b74735508d..6da96c4f98a5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -244,6 +244,7 @@ config PAGE_OWNER depends on DEBUG_KERNEL && STACKTRACE_SUPPORT select DEBUG_FS select STACKTRACE + select STACKDEPOT select PAGE_EXTENSION help This keeps track of what call chain is the owner of a page, may diff --git a/lib/Makefile b/lib/Makefile index 03a447a234cc..3ea641eb91bc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -167,6 +167,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o +obj-$(CONFIG_STACKDEPOT) += stackdepot.o + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o $(foreach file, $(libfdt_files), \ diff --git a/lib/cmdline.c b/lib/cmdline.c index 8f13cf73c2ec..79069d7938ea 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -22,14 +22,14 @@ * the values[M, M+1, ..., N] into the ints array in get_options. */ -static int get_range(char **str, int *pint) +static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) + for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } @@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints) break; if (res == 3) { int range_nums; - range_nums = get_range((char **)&str, ints + i); + range_nums = get_range((char **)&str, ints + i, nints - i); if (range_nums < 0) break; /* diff --git a/lib/stackdepot.c b/lib/stackdepot.c new file mode 100644 index 000000000000..192134b225ca --- /dev/null +++ b/lib/stackdepot.c @@ -0,0 +1,284 @@ +/* + * Generic stack depot for storing stack traces. + * + * Some debugging tools need to save stack traces of certain events which can + * be later presented to the user. For example, KASAN needs to safe alloc and + * free stacks for each object, but storing two stack traces per object + * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for + * that). + * + * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc + * and free stacks repeat a lot, we save about 100x space. + * Stacks are never removed from depot, so we store them contiguously one after + * another in a contiguos memory allocation. + * + * Author: Alexander Potapenko <glider@google.com> + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include <linux/gfp.h> +#include <linux/jhash.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> +#include <linux/string.h> +#include <linux/types.h> + +#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) + +#define STACK_ALLOC_NULL_PROTECTION_BITS 1 +#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ +#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) +#define STACK_ALLOC_ALIGN 4 +#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ + STACK_ALLOC_ALIGN) +#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ + STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) +#define STACK_ALLOC_SLABS_CAP 1024 +#define STACK_ALLOC_MAX_SLABS \ + (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ + (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) + +/* The compact structure to store the reference to stacks. */ +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 slabindex : STACK_ALLOC_INDEX_BITS; + u32 offset : STACK_ALLOC_OFFSET_BITS; + u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; + }; +}; + +struct stack_record { + struct stack_record *next; /* Link in the hashtable */ + u32 hash; /* Hash in the hastable */ + u32 size; /* Number of frames in the stack */ + union handle_parts handle; + unsigned long entries[1]; /* Variable-sized array of entries. */ +}; + +static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; + +static int depot_index; +static int next_slab_inited; +static size_t depot_offset; +static DEFINE_SPINLOCK(depot_lock); + +static bool init_stack_slab(void **prealloc) +{ + if (!*prealloc) + return false; + /* + * This smp_load_acquire() pairs with smp_store_release() to + * |next_slab_inited| below and in depot_alloc_stack(). + */ + if (smp_load_acquire(&next_slab_inited)) + return true; + if (stack_slabs[depot_index] == NULL) { + stack_slabs[depot_index] = *prealloc; + } else { + stack_slabs[depot_index + 1] = *prealloc; + /* + * This smp_store_release pairs with smp_load_acquire() from + * |next_slab_inited| above and in depot_save_stack(). + */ + smp_store_release(&next_slab_inited, 1); + } + *prealloc = NULL; + return true; +} + +/* Allocation of a new stack in raw storage */ +static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, + u32 hash, void **prealloc, gfp_t alloc_flags) +{ + int required_size = offsetof(struct stack_record, entries) + + sizeof(unsigned long) * size; + struct stack_record *stack; + + required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); + + if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { + if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { + WARN_ONCE(1, "Stack depot reached limit capacity"); + return NULL; + } + depot_index++; + depot_offset = 0; + /* + * smp_store_release() here pairs with smp_load_acquire() from + * |next_slab_inited| in depot_save_stack() and + * init_stack_slab(). + */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) + smp_store_release(&next_slab_inited, 0); + } + init_stack_slab(prealloc); + if (stack_slabs[depot_index] == NULL) + return NULL; + + stack = stack_slabs[depot_index] + depot_offset; + + stack->hash = hash; + stack->size = size; + stack->handle.slabindex = depot_index; + stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; + stack->handle.valid = 1; + memcpy(stack->entries, entries, size * sizeof(unsigned long)); + depot_offset += required_size; + + return stack; +} + +#define STACK_HASH_ORDER 18 +#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) +#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) +#define STACK_HASH_SEED 0x9747b28c + +static struct stack_record *stack_table[STACK_HASH_SIZE] = { + [0 ... STACK_HASH_SIZE - 1] = NULL +}; + +/* Calculate hash for a stack */ +static inline u32 hash_stack(unsigned long *entries, unsigned int size) +{ + return jhash2((u32 *)entries, + size * sizeof(unsigned long) / sizeof(u32), + STACK_HASH_SEED); +} + +/* Find a stack that is equal to the one stored in entries in the hash */ +static inline struct stack_record *find_stack(struct stack_record *bucket, + unsigned long *entries, int size, + u32 hash) +{ + struct stack_record *found; + + for (found = bucket; found; found = found->next) { + if (found->hash == hash && + found->size == size && + !memcmp(entries, found->entries, + size * sizeof(unsigned long))) { + return found; + } + } + return NULL; +} + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) +{ + union handle_parts parts = { .handle = handle }; + void *slab = stack_slabs[parts.slabindex]; + size_t offset = parts.offset << STACK_ALLOC_ALIGN; + struct stack_record *stack = slab + offset; + + trace->nr_entries = trace->max_entries = stack->size; + trace->entries = stack->entries; + trace->skip = 0; +} + +/** + * depot_save_stack - save stack in a stack depot. + * @trace - the stacktrace to save. + * @alloc_flags - flags for allocating additional memory if required. + * + * Returns the handle of the stack struct stored in depot. + */ +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, + gfp_t alloc_flags) +{ + u32 hash; + depot_stack_handle_t retval = 0; + struct stack_record *found = NULL, **bucket; + unsigned long flags; + struct page *page = NULL; + void *prealloc = NULL; + + if (unlikely(trace->nr_entries == 0)) + goto fast_exit; + + hash = hash_stack(trace->entries, trace->nr_entries); + bucket = &stack_table[hash & STACK_HASH_MASK]; + + /* + * Fast path: look the stack trace up without locking. + * The smp_load_acquire() here pairs with smp_store_release() to + * |bucket| below. + */ + found = find_stack(smp_load_acquire(bucket), trace->entries, + trace->nr_entries, hash); + if (found) + goto exit; + + /* + * Check if the current or the next stack slab need to be initialized. + * If so, allocate the memory - we won't be able to do that under the + * lock. + * + * The smp_load_acquire() here pairs with smp_store_release() to + * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). + */ + if (unlikely(!smp_load_acquire(&next_slab_inited))) { + /* + * Zero out zone modifiers, as we don't have specific zone + * requirements. Keep the flags related to allocation in atomic + * contexts and I/O. + */ + alloc_flags &= ~GFP_ZONEMASK; + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); + if (page) + prealloc = page_address(page); + } + + spin_lock_irqsave(&depot_lock, flags); + + found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); + if (!found) { + struct stack_record *new = + depot_alloc_stack(trace->entries, trace->nr_entries, + hash, &prealloc, alloc_flags); + if (new) { + new->next = *bucket; + /* + * This smp_store_release() pairs with + * smp_load_acquire() from |bucket| above. + */ + smp_store_release(bucket, new); + found = new; + } + } else if (prealloc) { + /* + * We didn't need to store this stack trace, but let's keep + * the preallocated memory for the future. + */ + WARN_ON(!init_stack_slab(&prealloc)); + } + + spin_unlock_irqrestore(&depot_lock, flags); +exit: + if (prealloc) { + /* Nobody used this memory, ok to free it. */ + free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); + } + if (found) + retval = found->handle.handle; +fast_exit: + return retval; +} diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 76f29ecba8f4..771234d050c7 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* - * For mappings greater than a page, we limit the stride (and - * hence alignment) to a page size. + * For mappings greater than or equal to a page, we limit the stride + * (and hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size > PAGE_SIZE) + if (size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; diff --git a/mm/compaction.c b/mm/compaction.c index 87ed50835881..f96a58e1843a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -19,6 +19,7 @@ #include <linux/kasan.h> #include <linux/kthread.h> #include <linux/freezer.h> +#include <linux/page_owner.h> #include "internal.h" #ifdef CONFIG_COMPACTION @@ -59,13 +60,27 @@ static unsigned long release_freepages(struct list_head *freelist) static void map_pages(struct list_head *list) { - struct page *page; + unsigned int i, order, nr_pages; + struct page *page, *next; + LIST_HEAD(tmp_list); + + list_for_each_entry_safe(page, next, list, lru) { + list_del(&page->lru); - list_for_each_entry(page, list, lru) { - kasan_alloc_pages(page, 0); - arch_alloc_page(page, 0); - kernel_map_pages(page, 1, 1); + order = page_private(page); + nr_pages = 1 << order; + + post_alloc_hook(page, order, __GFP_MOVABLE); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } } + + list_splice(&tmp_list, list); } static inline bool migrate_async_suitable(int migratetype) @@ -442,12 +457,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; + unsigned int order; cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn++, cursor++) { - int isolated, i; + int isolated; struct page *page = cursor; /* @@ -513,17 +529,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, goto isolate_fail; } - /* Found a free page, break it into order-0 pages */ - isolated = split_free_page(page); + /* Found a free page, will break it into order-0 pages */ + order = page_order(page); + isolated = __isolate_free_page(page, order); if (!isolated) break; + set_page_private(page, order); total_isolated += isolated; cc->nr_freepages += isolated; - for (i = 0; i < isolated; i++) { - list_add(&page->lru, freelist); - page++; - } + list_add_tail(&page->lru, freelist); + if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; @@ -636,7 +652,7 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(&freelist); if (pfn < end_pfn) { @@ -1085,7 +1101,7 @@ static void isolate_freepages(struct compact_control *cc) } } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(freelist); /* diff --git a/mm/debug.c b/mm/debug.c index 5cf26f8c69a3..3621385c09ac 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -9,6 +9,18 @@ #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> +#include <linux/migrate.h> +#include <linux/page_owner.h> + +char *migrate_reason_names[MR_TYPES] = { + "compaction", + "memory_failure", + "memory_hotplug", + "syscall_or_cpuset", + "mempolicy_mbind", + "numa_misplaced", + "cma", +}; static const struct trace_print_flags pageflag_names[] = { {1UL << PG_locked, "locked" }, @@ -106,6 +118,7 @@ void dump_page_badflags(struct page *page, const char *reason, void dump_page(struct page *page, const char *reason) { dump_page_badflags(page, reason, 0); + dump_page_owner(page); } EXPORT_SYMBOL(dump_page); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 47b469663822..6c6f5ccfcda1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, */ if (unlikely(pmd_trans_migrating(*pmdp))) { page = pmd_page(*pmdp); + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(ptl); wait_on_page_locked(page); + put_page(page); goto out; } @@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(ptl); wait_on_page_locked(page); + put_page(page); page_nid = -1; goto out; } diff --git a/mm/internal.h b/mm/internal.h index 7b9e313d9dea..46d27f378885 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif +extern void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/list_lru.c b/mm/list_lru.c index 5d8dffd5b57c..786176b1a0ee 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; + nlru->nr_items++; spin_unlock(&nlru->lock); return true; } @@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; + nlru->nr_items--; spin_unlock(&nlru->lock); return true; } @@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { - long count = 0; - int memcg_idx; + struct list_lru_node *nlru; - count += __list_lru_count_one(lru, nid, -1); - if (list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) - count += __list_lru_count_one(lru, nid, memcg_idx); - } - return count; + nlru = &lru->node[nid]; + return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); @@ -226,6 +223,7 @@ restart: assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; + nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to diff --git a/mm/migrate.c b/mm/migrate.c index 9a5ccfc71afc..85af2816b6d2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -39,6 +39,7 @@ #include <linux/balloon_compaction.h> #include <linux/mmu_notifier.h> #include <linux/page_idle.h> +#include <linux/page_owner.h> #include <asm/tlbflush.h> @@ -668,6 +669,8 @@ void migrate_page_copy(struct page *newpage, struct page *page) */ if (PageWriteback(newpage)) end_page_writeback(newpage); + + copy_page_owner(page, newpage); } EXPORT_SYMBOL(migrate_page_copy); @@ -1097,6 +1100,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, goto out; rc = __unmap_and_move(page, newpage, force, mode); + if (rc == MIGRATEPAGE_SUCCESS) { + set_page_owner_migrate_reason(newpage, reason); + } out: if (rc != -EAGAIN) { @@ -1179,7 +1185,7 @@ put_new: static int unmap_and_move_huge_page(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *hpage, int force, - enum migrate_mode mode) + enum migrate_mode mode, int reason) { int rc = -EAGAIN; int *result = NULL; @@ -1237,6 +1243,7 @@ put_anon: if (rc == MIGRATEPAGE_SUCCESS) { hugetlb_cgroup_migrate(hpage, new_hpage); put_new_page = NULL; + set_page_owner_migrate_reason(new_hpage, reason); } unlock_page(hpage); @@ -1311,7 +1318,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, if (PageHuge(page)) rc = unmap_and_move_huge_page(get_new_page, put_new_page, private, page, - pass > 2, mode); + pass > 2, mode, reason); else rc = unmap_and_move(get_new_page, put_new_page, private, page, pass > 2, mode, @@ -2001,6 +2008,7 @@ fail_putback: set_page_memcg(new_page, page_memcg(page)); set_page_memcg(page, NULL); page_remove_rmap(page); + set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4fbb23c1cba7..6759192e69de 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -223,6 +223,20 @@ static char * const zone_names[MAX_NR_ZONES] = { }; static void free_compound_page(struct page *page); + +char * const migratetype_names[MIGRATE_TYPES] = { + "Unmovable", + "Movable", + "Reclaimable", +#ifdef CONFIG_CMA + "CMA", +#endif + "HighAtomic", +#ifdef CONFIG_MEMORY_ISOLATION + "Isolate", +#endif +}; + compound_page_dtor * const compound_page_dtors[] = { NULL, free_compound_page, @@ -459,6 +473,7 @@ static void bad_page(struct page *page, const char *reason, printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); dump_page_badflags(page, reason, bad_flags); + dump_page_owner(page); print_modules(); dump_stack(); @@ -569,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); @@ -586,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); @@ -1422,8 +1443,21 @@ static inline bool free_pages_prezeroed(void) page_poisoning_enabled(); } +inline void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags) +{ + set_page_private(page, 0); + set_page_refcounted(page); + + kasan_alloc_pages(page, order); + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + kernel_poison_pages(page, 1 << order, 1); + set_page_owner(page, order, gfp_flags); +} + static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, - int alloc_flags) + int alloc_flags) { int i; @@ -1433,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, return 1; } - set_page_private(page, 0); - set_page_refcounted(page); - - kasan_alloc_pages(page, order); - arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); - kernel_poison_pages(page, 1 << order, 1); + post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) @@ -1448,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); - set_page_owner(page, order, gfp_flags); - /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking @@ -2217,7 +2243,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) void split_page(struct page *page, unsigned int order) { int i; - gfp_t gfp_mask; VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); @@ -2231,12 +2256,9 @@ void split_page(struct page *page, unsigned int order) split_page(virt_to_page(page[0].shadow), order); #endif - gfp_mask = get_page_owner_gfp(page); - set_page_owner(page, 0, gfp_mask); - for (i = 1; i < (1 << order); i++) { + for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - set_page_owner(page + i, 0, gfp_mask); - } + split_page_owner(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -2266,8 +2288,6 @@ int __isolate_free_page(struct page *page, unsigned int order) zone->free_area[order].nr_free--; rmv_page_order(page); - set_page_owner(page, order, __GFP_MOVABLE); - /* Set the pageblock if the isolated page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; @@ -2285,33 +2305,6 @@ int __isolate_free_page(struct page *page, unsigned int order) } /* - * Similar to split_page except the page is already free. As this is only - * being used for migration, the migratetype of the block also changes. - * As this is called with interrupts disabled, the caller is responsible - * for calling arch_alloc_page() and kernel_map_page() after interrupts - * are enabled. - * - * Note: this is probably too low level an operation for use in drivers. - * Please consult with lkml before using this in your driver. - */ -int split_free_page(struct page *page) -{ - unsigned int order; - int nr_pages; - - order = page_order(page); - - nr_pages = __isolate_free_page(page, order); - if (!nr_pages) - return 0; - - /* Split into individual pages */ - set_page_refcounted(page); - split_page(page, order); - return nr_pages; -} - -/* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 3ecd3807c2c2..efb6c3c38c01 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -8,6 +8,7 @@ #include <linux/memory.h> #include <linux/hugetlb.h> #include <linux/kasan.h> +#include <linux/page_owner.h> #include "internal.h" static int set_migratetype_isolate(struct page *page, @@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); - kasan_alloc_pages(page, order); - arch_alloc_page(page, order); - kernel_map_pages(page, (1 << order), 1); - set_page_refcounted(page); isolated_page = page; } } @@ -128,8 +125,10 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); - if (isolated_page) + if (isolated_page) { + post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); + } } static inline struct page * diff --git a/mm/page_owner.c b/mm/page_owner.c index 45cccaa1ce32..3a9a358e7c63 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -5,11 +5,24 @@ #include <linux/bootmem.h> #include <linux/stacktrace.h> #include <linux/page_owner.h> +#include <linux/jump_label.h> +#include <linux/migrate.h> +#include <linux/stackdepot.h> + #include "internal.h" +/* + * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) + * to use off stack temporal storage + */ +#define PAGE_OWNER_STACK_DEPTH (16) + static bool page_owner_disabled = !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT); -bool page_owner_inited __read_mostly; +DEFINE_STATIC_KEY_FALSE(page_owner_inited); + +static depot_stack_handle_t dummy_handle; +static depot_stack_handle_t failure_handle; static void init_early_allocated_pages(void); @@ -36,12 +49,42 @@ static bool need_page_owner(void) return true; } +static noinline void register_dummy_stack(void) +{ + unsigned long entries[4]; + struct stack_trace dummy; + + dummy.nr_entries = 0; + dummy.max_entries = ARRAY_SIZE(entries); + dummy.entries = &entries[0]; + dummy.skip = 0; + + save_stack_trace(&dummy); + dummy_handle = depot_save_stack(&dummy, GFP_KERNEL); +} + +static noinline void register_failure_stack(void) +{ + unsigned long entries[4]; + struct stack_trace failure; + + failure.nr_entries = 0; + failure.max_entries = ARRAY_SIZE(entries); + failure.entries = &entries[0]; + failure.skip = 0; + + save_stack_trace(&failure); + failure_handle = depot_save_stack(&failure, GFP_KERNEL); +} + static void init_page_owner(void) { if (page_owner_disabled) return; - page_owner_inited = true; + register_dummy_stack(); + register_failure_stack(); + static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } @@ -57,46 +100,141 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); + if (unlikely(!page_ext)) + continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } -void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) +static inline bool check_recursive_alloc(struct stack_trace *trace, + unsigned long ip) { - struct page_ext *page_ext = lookup_page_ext(page); + int i, count; + + if (!trace->nr_entries) + return false; + + for (i = 0, count = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip && ++count == 2) + return true; + } + + return false; +} + +static noinline depot_stack_handle_t save_stack(gfp_t flags) +{ + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, - .max_entries = ARRAY_SIZE(page_ext->trace_entries), - .entries = &page_ext->trace_entries[0], - .skip = 3, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; + depot_stack_handle_t handle; save_stack_trace(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + /* + * We need to check recursion here because our request to stackdepot + * could trigger memory allocation to save new entry. New memory + * allocation would reach here and call depot_save_stack() again + * if we don't catch it. There is still not enough memory in stackdepot + * so it would try to allocate memory again and loop forever. + */ + if (check_recursive_alloc(&trace, _RET_IP_)) + return dummy_handle; + + handle = depot_save_stack(&trace, flags); + if (!handle) + handle = failure_handle; + + return handle; +} + +noinline void __set_page_owner(struct page *page, unsigned int order, + gfp_t gfp_mask) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + page_ext->handle = save_stack(gfp_mask); page_ext->order = order; page_ext->gfp_mask = gfp_mask; - page_ext->nr_entries = trace.nr_entries; + page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); } -gfp_t __get_page_owner_gfp(struct page *page) +void __set_page_owner_migrate_reason(struct page *page, int reason) +{ + struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + + page_ext->last_migrate_reason = reason; +} + +void __split_page_owner(struct page *page, unsigned int order) { + int i; struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + /* + * The caller just returns if no valid gfp + * So return here too. + */ + return; - return page_ext->gfp_mask; + page_ext->order = 0; + for (i = 1; i < (1 << order); i++) + __copy_page_owner(page, page + i); +} + +void __copy_page_owner(struct page *oldpage, struct page *newpage) +{ + struct page_ext *old_ext = lookup_page_ext(oldpage); + struct page_ext *new_ext = lookup_page_ext(newpage); + + if (unlikely(!old_ext || !new_ext)) + return; + + new_ext->order = old_ext->order; + new_ext->gfp_mask = old_ext->gfp_mask; + new_ext->last_migrate_reason = old_ext->last_migrate_reason; + new_ext->handle = old_ext->handle; + + /* + * We don't clear the bit on the oldpage as it's going to be freed + * after migration. Until then, the info can be useful in case of + * a bug, and the overal stats will be off a bit only temporarily. + * Also, migrate_misplaced_transhuge_page() can still fail the + * migration and then we want the oldpage to retain the info. But + * in that case we also don't need to explicitly clear the info from + * the new page, which will be freed. + */ + __set_bit(PAGE_EXT_OWNER, &new_ext->flags); } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, - struct page *page, struct page_ext *page_ext) + struct page *page, struct page_ext *page_ext, + depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { - .nr_entries = page_ext->nr_entries, - .entries = &page_ext->trace_entries[0], + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); @@ -104,8 +242,9 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, return -ENOMEM; ret = snprintf(kbuf, count, - "Page allocated via order %u, mask 0x%x\n", - page_ext->order, page_ext->gfp_mask); + "Page allocated via order %u, mask %#x(%pGg)\n", + page_ext->order, page_ext->gfp_mask, + &page_ext->gfp_mask); if (ret >= count) goto err; @@ -114,31 +253,29 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, pageblock_mt = get_pfnblock_migratetype(page, pfn); page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); ret += snprintf(kbuf + ret, count - ret, - "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", + "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", pfn, + migratetype_names[page_mt], pfn >> pageblock_order, - pageblock_mt, - pageblock_mt != page_mt ? "Fallback" : " ", - PageLocked(page) ? "K" : " ", - PageError(page) ? "E" : " ", - PageReferenced(page) ? "R" : " ", - PageUptodate(page) ? "U" : " ", - PageDirty(page) ? "D" : " ", - PageLRU(page) ? "L" : " ", - PageActive(page) ? "A" : " ", - PageSlab(page) ? "S" : " ", - PageWriteback(page) ? "W" : " ", - PageCompound(page) ? "C" : " ", - PageSwapCache(page) ? "B" : " ", - PageMappedToDisk(page) ? "M" : " "); + migratetype_names[pageblock_mt], + page->flags, &page->flags); if (ret >= count) goto err; + depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; + if (page_ext->last_migrate_reason != -1) { + ret += snprintf(kbuf + ret, count - ret, + "Page has been migrated, last migrate reason: %s\n", + migrate_reason_names[page_ext->last_migrate_reason]); + if (ret >= count) + goto err; + } + ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; @@ -154,14 +291,58 @@ err: return -ENOMEM; } +void __dump_page_owner(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; + struct stack_trace trace = { + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 + }; + depot_stack_handle_t handle; + gfp_t gfp_mask; + int mt; + + if (unlikely(!page_ext)) { + pr_alert("There is not page extension available.\n"); + return; + } + gfp_mask = page_ext->gfp_mask; + mt = gfpflags_to_migratetype(gfp_mask); + + if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { + pr_alert("page_owner info is not active (free page?)\n"); + return; + } + + handle = READ_ONCE(page_ext->handle); + if (!handle) { + pr_alert("page_owner info is not active (free page?)\n"); + return; + } + + depot_fetch_stack(handle, &trace); + pr_alert("page allocated via order %u, migratetype %s, " + "gfp_mask %#x(%pGg)\n", page_ext->order, + migratetype_names[mt], gfp_mask, &gfp_mask); + print_stack_trace(&trace, 0); + + if (page_ext->last_migrate_reason != -1) + pr_alert("page has been migrated, last migrate reason: %s\n", + migrate_reason_names[page_ext->last_migrate_reason]); +} + static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; + depot_stack_handle_t handle; - if (!page_owner_inited) + if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; page = NULL; @@ -198,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* * Some pages could be missed by concurrent allocation or free, @@ -206,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; + /* + * Access to page_ext->handle isn't synchronous so we should + * be careful to access it. + */ + handle = READ_ONCE(page_ext->handle); + if (!handle) + continue; + /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; - return print_page_owner(buf, count, pfn, page, page_ext); + return print_page_owner(buf, count, pfn, page, + page_ext, handle); } return 0; @@ -248,6 +440,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) page = pfn_to_page(pfn); + if (page_zone(page) != zone) + continue; + /* * We are safe to check buddy flag and order, because * this is init stage and only single thread runs. @@ -261,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) @@ -309,7 +506,7 @@ static int __init pageowner_init(void) { struct dentry *dentry; - if (!page_owner_inited) { + if (!static_branch_unlikely(&page_owner_inited)) { pr_info("page_owner is disabled\n"); return 0; } diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 40dd0f9b00d6..09f733b0424a 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } diff --git a/mm/vmstat.c b/mm/vmstat.c index d6b4817c4416..3c0796cd3f80 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -927,19 +927,6 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, #endif #ifdef CONFIG_PROC_FS -static char * const migratetype_names[MIGRATE_TYPES] = { - "Unmovable", - "Movable", - "Reclaimable", -#ifdef CONFIG_CMA - "CMA", -#endif - "HighAtomic", -#ifdef CONFIG_MEMORY_ISOLATION - "Isolate", -#endif -}; - static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) { @@ -1103,6 +1090,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; @@ -1140,7 +1129,7 @@ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) #ifdef CONFIG_PAGE_OWNER int mtype; - if (!page_owner_inited) + if (!static_branch_unlikely(&page_owner_inited)) return; drain_all_pages(NULL); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index ad8d6e6b87ca..e20ae2d3c498 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - free_netdev(new_dev); + if (new_dev->reg_state == NETREG_UNINITIALIZED) + free_netdev(new_dev); return err; } diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 59ce1fcc220c..71b6ab240dea 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; - if (likely(in_interrupt())) - skb = alloc_skb(len + pfx, GFP_ATOMIC); - else - skb = alloc_skb(len + pfx, GFP_KERNEL); - + skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; diff --git a/net/core/dev.c b/net/core/dev.c index 50e77fe096f4..9a3aaba15c5a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1248,8 +1248,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; + memcpy(dev->ifalias, alias, len); + dev->ifalias[len] = 0; - strlcpy(dev->ifalias, alias, len+1); return len; } @@ -4560,8 +4561,13 @@ static void net_rps_send_ipi(struct softnet_data *remsd) while (remsd) { struct softnet_data *next = remsd->rps_ipi_next; - if (cpu_online(remsd->cpu)) + if (cpu_online(remsd->cpu)) { smp_call_function_single_async(remsd->cpu, &remsd->csd); + } else { + rps_lock(remsd); + remsd->backlog.state = 0; + rps_unlock(remsd); + } remsd = next; } #endif diff --git a/net/core/dst.c b/net/core/dst.c index d7ad628bf64e..e72d706f8d0c 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -462,6 +462,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, spin_lock_bh(&dst_garbage.lock); dst = dst_garbage.list; dst_garbage.list = NULL; + /* The code in dst_ifdown places a hold on the loopback device. + * If the gc entry processing is set to expire after a lengthy + * interval, this hold can cause netdev_wait_allrefs() to hang + * out and wait for a long time -- until the the loopback + * interface is released. If we're really unlucky, it'll emit + * pr_emerg messages to console too. Reset the interval here, + * so dst cleanups occur in a more timely fashion. + */ + if (dst_garbage.timer_inc > DST_GC_INC) { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + mod_delayed_work(system_wq, &dst_gc_work, + dst_garbage.timer_expires); + } spin_unlock_bh(&dst_garbage.lock); if (last) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d43544ce7550..2ec5324a7ff7 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -897,6 +897,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + nla_total_size(4) /* IFLA_LINK_NETNSID */ + + nla_total_size(4) /* IFLA_GROUP */ + nla_total_size(ext_filter_mask & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ @@ -1089,6 +1090,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct ifla_vf_mac vf_mac; struct ifla_vf_info ivi; + memset(&ivi, 0, sizeof(ivi)); + /* Not all SR-IOV capable drivers support the * spoofcheck and "RSS query enable" query. Preset to * -1 so the user space tool can detect that the driver @@ -1097,7 +1100,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ivi.spoofchk = -1; ivi.rss_query_en = -1; ivi.trusted = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero */ @@ -1370,6 +1372,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, + [IFLA_GROUP] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index b1dc096d22f8..403593bd2b83 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } -static inline void dnrt_drop(struct dn_route *rt) -{ - dst_release(&rt->dst); - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); -} - static void dn_dst_check_expire(unsigned long dummy) { int i; @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) } *rtp = rt->dst.dn_next; rt->dst.dn_next = NULL; - dnrt_drop(rt); + dnrt_free(rt); break; } spin_unlock_bh(&dn_rt_hash_table[i].lock); @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou dst_use(&rth->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); - dnrt_drop(rt); + dst_free(&rt->dst); *rp = rth; return 0; } @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) for(; rt; rt = next) { next = rcu_dereference_raw(rt->dst.dn_next); RCU_INIT_POINTER(rt->dst.dn_next, NULL); - dst_free((struct dst_entry *)rt); + dnrt_free(rt); } nothing_to_declare: @@ -1187,7 +1181,7 @@ make_route: if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); if (rt == NULL) goto e_nobufs; diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 85f2fdc360c2..29246bc9a7b4 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) + if (skb->len < sizeof(*nlh) || + nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 8dfe9fb7ad36..554c2a961ad5 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1006,10 +1006,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, /* Use already configured phy mode */ if (p->phy_interface == PHY_INTERFACE_MODE_NA) p->phy_interface = p->phy->interface; - phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, - p->phy_interface); - - return 0; + return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); } static int dsa_slave_phy_setup(struct dsa_slave_priv *p, diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 17adfdaf5795..3809d523d012 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1102,6 +1102,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; + spin_lock_init(&pmc->lock); spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); @@ -2026,21 +2027,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, static void ip_mc_clear_src(struct ip_mc_list *pmc) { - struct ip_sf_list *psf, *nextpsf; + struct ip_sf_list *psf, *nextpsf, *tomb, *sources; - for (psf = pmc->tomb; psf; psf = nextpsf) { + spin_lock_bh(&pmc->lock); + tomb = pmc->tomb; + pmc->tomb = NULL; + sources = pmc->sources; + pmc->sources = NULL; + pmc->sfmode = MCAST_EXCLUDE; + pmc->sfcount[MCAST_INCLUDE] = 0; + pmc->sfcount[MCAST_EXCLUDE] = 1; + spin_unlock_bh(&pmc->lock); + + for (psf = tomb; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->tomb = NULL; - for (psf = pmc->sources; psf; psf = nextpsf) { + for (psf = sources; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->sources = NULL; - pmc->sfmode = MCAST_EXCLUDE; - pmc->sfcount[MCAST_INCLUDE] = 0; - pmc->sfcount[MCAST_EXCLUDE] = 1; } /* Join a multicast group diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4fe0359d8abf..55e928819846 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -320,9 +320,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay) { - if (!delayed_work_pending(&ifp->dad_work)) - in6_ifa_hold(ifp); - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + in6_ifa_hold(ifp); + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) + in6_ifa_put(ifp); } static int snmp6_alloc_dev(struct inet6_dev *idev) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 27796c33ca05..d7c1ee7cf0e2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -184,6 +184,11 @@ ipv4_connected: err = 0; if (IS_ERR(dst)) { err = PTR_ERR(dst); + /* Reset daddr and dport so that udp_v6_early_demux() + * fails to find this socket + */ + memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); + inet->inet_dport = 0; goto out; } diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index ed33abf57abd..9ac4f0cef27d 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -32,7 +32,6 @@ struct fib6_rule { struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { - struct rt6_info *rt; struct fib_lookup_arg arg = { .lookup_ptr = lookup, .flags = FIB_LOOKUP_NOREF, @@ -41,21 +40,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, fib_rules_lookup(net->ipv6.fib6_rules_ops, flowi6_to_flowi(fl6), flags, &arg); - rt = arg.result; + if (arg.result) + return arg.result; - if (!rt) { - dst_hold(&net->ipv6.ip6_null_entry->dst); - return &net->ipv6.ip6_null_entry->dst; - } - - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { - ip6_rt_put(rt); - rt = net->ipv6.ip6_null_entry; - dst_hold(&rt->dst); - } - - return &rt->dst; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return &net->ipv6.ip6_null_entry->dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, @@ -116,7 +105,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, flp6->saddr = saddr; } err = rt->dst.error; - goto out; + if (err != -EAGAIN) + goto out; } again: ip6_rt_put(rt); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 85bf86458706..1ac06723f0d7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -290,8 +290,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, struct rt6_info *rt; rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { + if (rt->dst.error == -EAGAIN) { ip6_rt_put(rt); rt = net->ipv6.ip6_null_entry; dst_hold(&rt->dst); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ef745df36fc8..72b53b83a720 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1004,8 +1004,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, } #endif if (ipv6_addr_v4mapped(&fl6->saddr) && - !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) - return -EAFNOSUPPORT; + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { + err = -EAFNOSUPPORT; + goto out_err_release; + } return 0; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8a07c63ddccd..a8cabc876348 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -45,6 +45,7 @@ #include <net/tcp_states.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> +#include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/busy_poll.h> @@ -964,15 +965,22 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, int dif) { struct sock *sk; + struct hlist_nulls_node *hnode; + unsigned short hnum = ntohs(loc_port); + unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum); + unsigned int slot2 = hash2 & udp_table.mask; + struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; - rcu_read_lock(); - sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port, - dif, &udp_table); - if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) - sk = NULL; - rcu_read_unlock(); + const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); - return sk; + udp_portaddr_for_each_entry_rcu(sk, hnode, &hslot2->head) { + if (sk->sk_state == TCP_ESTABLISHED && + INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) + return sk; + /* Only check first socket in chain */ + break; + } + return NULL; } static void udp_v6_early_demux(struct sk_buff *skb) @@ -997,7 +1005,7 @@ static void udp_v6_early_demux(struct sk_buff *skb) else return; - if (!sk) + if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) return; skb->sk = sk; diff --git a/net/key/af_key.c b/net/key/af_key.c index f9c9ecb0cdd3..e67c28e614b9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } + err = -ENOBUFS; key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (sa->sadb_sa_auth) { int keysize = 0; @@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); - if (!x->aalg) + if (!x->aalg) { + err = -ENOMEM; goto out; + } strcpy(x->aalg->alg_name, a->name); x->aalg->alg_key_len = 0; if (key) { @@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); - if (!x->calg) + if (!x->calg) { + err = -ENOMEM; goto out; + } strcpy(x->calg->alg_name, a->name); x->props.calgo = sa->sadb_sa_encrypt; } else { @@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); - if (!x->ealg) + if (!x->ealg) { + err = -ENOMEM; goto out; + } strcpy(x->ealg->alg_name, a->name); x->ealg->alg_key_len = 0; if (key) { @@ -1227,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, struct xfrm_encap_tmpl *natt; x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); - if (!x->encap) + if (!x->encap) { + err = -ENOMEM; goto out; + } natt = x->encap; n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 175ffcf7fb06..2ee53dc1ddf7 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -891,12 +891,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; - if (sband->ht_cap.ht_supported) - local->rx_chains = - max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), - local->rx_chains); + if (!sband->ht_cap.ht_supported) + continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ + local->rx_chains = + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), + local->rx_chains); + + /* no need to mask, SM_PS_DISABLED has all bits set */ + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN */ diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 9f5272968abb..e565b2becb14 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -45,6 +45,8 @@ #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_labels.h> +#include <net/netfilter/nf_conntrack_seqadj.h> +#include <net/netfilter/nf_conntrack_synproxy.h> #ifdef CONFIG_NF_NAT_NEEDED #include <net/netfilter/nf_nat_core.h> #include <net/netfilter/nf_nat_l4proto.h> @@ -1798,6 +1800,8 @@ ctnetlink_create_conntrack(struct net *net, nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); nf_ct_labels_ext_add(ct); + nfct_seqadj_ext_add(ct); + nfct_synproxy_ext_add(ct); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index b7c43def0dc6..00f798b20b20 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; - if (len < tcp_hdrlen) + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (len > tcp_hdrlen) return 0; + /* tcph->doff has 4 bits, do not wrap it to 0 */ + if (tcp_hdrlen >= 15 * 4) + return 0; + /* * MSS Option not found ?! add it.. */ diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index da3cc09f683e..91d43ab3a961 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, n_parts, loop, tmp; + unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; /* there must be at least one name, and at least #names+1 length * words */ @@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->name_parts[loop]) return -ENOMEM; memcpy(princ->name_parts[loop], xdr, tmp); princ->name_parts[loop][tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } if (toklen < 4) @@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->realm = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->realm) return -ENOMEM; memcpy(princ->realm, xdr, tmp); princ->realm[tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; _debug("%s/...@%s", princ->name_parts[0], princ->realm); @@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, toklen -= 8; if (len > max_data_size) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; td->data_len = len; if (len > 0) { td->data = kmemdup(xdr, len, GFP_KERNEL); if (!td->data) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } _debug("tag %x len %x", td->tag, td->data_len); @@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, const __be32 **_xdr, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one length word */ if (toklen <= 4) @@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, toklen -= 4; if (len > AFSTOKEN_K5_TIX_MAX) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; *_tktlen = len; _debug("ticket len %u", len); @@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, *_ticket = kmemdup(xdr, len, GFP_KERNEL); if (!*_ticket) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } *_xdr = xdr; @@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) { const __be32 *xdr = prep->data, *token; const char *cp; - unsigned int len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; size_t datalen = prep->datalen; int ret; @@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) if (len < 1 || len > AFSTOKEN_CELL_MAX) goto not_xdr; datalen -= 4; - tmp = (len + 3) & ~3; - if (tmp > datalen) + paddedlen = (len + 3) & ~3; + if (paddedlen > datalen) goto not_xdr; cp = (const char *) xdr; for (loop = 0; loop < len; loop++) if (!isprint(cp[loop])) goto not_xdr; - if (len < tmp) - for (; loop < tmp; loop++) - if (cp[loop]) - goto not_xdr; + for (; loop < paddedlen; loop++) + if (cp[loop]) + goto not_xdr; _debug("cellname: [%u/%u] '%*.*s'", - len, tmp, len, len, (const char *) xdr); - datalen -= tmp; - xdr += tmp >> 2; + len, paddedlen, len, len, (const char *) xdr); + datalen -= paddedlen; + xdr += paddedlen >> 2; /* get the token count */ if (datalen < 12) @@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) sec_ix = ntohl(*xdr); datalen -= 4; _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); - if (toklen < 20 || toklen > datalen) + paddedlen = (toklen + 3) & ~3; + if (toklen < 20 || toklen > datalen || paddedlen > datalen) goto not_xdr; - datalen -= (toklen + 3) & ~3; - xdr += (toklen + 3) >> 2; + datalen -= paddedlen; + xdr += paddedlen >> 2; } while (--loop > 0); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 956141b71619..3ebf3b652d60 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; - if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) + if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 1f5d18d80fba..decd8b8d751f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -997,7 +997,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct path path = { NULL, NULL }; err = -EINVAL; - if (sunaddr->sun_family != AF_UNIX) + if (addr_len < offsetofend(struct sockaddr_un, sun_family) || + sunaddr->sun_family != AF_UNIX) goto out; if (addr_len == sizeof(short)) { @@ -1108,6 +1109,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unsigned int hash; int err; + err = -EINVAL; + if (alen < offsetofend(struct sockaddr, sa_family)) + goto out; + if (addr->sa_family != AF_UNSPEC) { err = unix_mkname(sunaddr, alen, &hash); if (err < 0) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d5b4ac7bf0d8..0f45e1a3a7d1 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1773,43 +1773,6 @@ free_dst: goto out; } -#ifdef CONFIG_XFRM_SUB_POLICY -static int xfrm_dst_alloc_copy(void **target, const void *src, int size) -{ - if (!*target) { - *target = kmalloc(size, GFP_ATOMIC); - if (!*target) - return -ENOMEM; - } - - memcpy(*target, src, size); - return 0; -} -#endif - -static int xfrm_dst_update_parent(struct dst_entry *dst, - const struct xfrm_selector *sel) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->partner), - sel, sizeof(*sel)); -#else - return 0; -#endif -} - -static int xfrm_dst_update_origin(struct dst_entry *dst, - const struct flowi *fl) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); -#else - return 0; -#endif -} - static int xfrm_expand_policies(const struct flowi *fl, u16 family, struct xfrm_policy **pols, int *num_pols, int *num_xfrms) @@ -1881,16 +1844,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, xdst = (struct xfrm_dst *)dst; xdst->num_xfrms = err; - if (num_pols > 1) - err = xfrm_dst_update_parent(dst, &pols[1]->selector); - else - err = xfrm_dst_update_origin(dst, fl); - if (unlikely(err)) { - dst_free(dst); - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); - return ERR_PTR(err); - } - xdst->num_pols = num_pols; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 5ab9d1e3e2b8..d0221769ba52 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -740,6 +740,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, sbsec->flags |= SE_SBPROC | SE_SBGENFS; if (!strcmp(sb->s_type->name, "debugfs") || + !strcmp(sb->s_type->name, "tracefs") || !strcmp(sb->s_type->name, "sysfs") || !strcmp(sb->s_type->name, "pstore")) sbsec->flags |= SE_SBGENFS; diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 373fcad840ea..776dffa88aee 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -294,6 +294,8 @@ struct hda_codec { #define list_for_each_codec(c, bus) \ list_for_each_entry(c, &(bus)->core.codec_list, core.list) +#define list_for_each_codec_safe(c, n, bus) \ + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) /* snd_hda_codec_read/write optional flags */ #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 5baf8b56b6e7..9c6e10fb479f 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1128,8 +1128,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs); /* configure each codec instance */ int azx_codec_configure(struct azx *chip) { - struct hda_codec *codec; - list_for_each_codec(codec, &chip->bus) { + struct hda_codec *codec, *next; + + /* use _safe version here since snd_hda_codec_configure() deregisters + * the device upon error and deletes itself from the bus list. + */ + list_for_each_codec_safe(codec, next, &chip->bus) { snd_hda_codec_configure(codec); } return 0; diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index dc2fa576d60d..689df78f640a 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3190,6 +3190,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec) spec->input_paths[i][nums]); spec->input_paths[i][nums] = spec->input_paths[i][n]; + spec->input_paths[i][n] = 0; } } nums++; diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c index b91d13c8e010..58df020811f1 100644 --- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c +++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c @@ -1039,7 +1039,6 @@ static int msm_sdw_swrm_read(void *handle, int reg) __func__, reg); sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0; sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0; - /* * Add sleep as SWR slave access read takes time. * Allow for RD_DONE to complete for previous register if any. @@ -1054,6 +1053,8 @@ static int msm_sdw_swrm_read(void *handle, int reg) dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__); goto err; } + /* Add sleep for SWR register read value to get updated. */ + usleep_range(100, 105); /* Check for RD value */ ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base, (u8 *)&val, 4); @@ -1079,12 +1080,12 @@ static int msm_sdw_bulk_write(struct msm_sdw_priv *msm_sdw, sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0; sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0; - /* - * Add sleep as SWR slave write takes time. - * Allow for any previous pending write to complete. - */ - usleep_range(50, 55); for (i = 0; i < len; i += 2) { + /* + * Add sleep as SWR slave write takes time. + * Allow for any previous pending write to complete. + */ + usleep_range(100, 105); /* First Write the Data to register */ ret = regmap_bulk_write(msm_sdw->regmap, sdw_wr_data_base, bulk_reg[i].buf, 4); diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c index 3d131ed463c9..997a623033fb 100644 --- a/sound/soc/codecs/wcd-mbhc-v2.c +++ b/sound/soc/codecs/wcd-mbhc-v2.c @@ -668,7 +668,8 @@ static void wcd_mbhc_report_plug(struct wcd_mbhc *mbhc, int insertion, jack_type == SND_JACK_LINEOUT) && (mbhc->hph_status && mbhc->hph_status != jack_type)) { - if (mbhc->micbias_enable) { + if (mbhc->micbias_enable && + mbhc->hph_status == SND_JACK_HEADSET) { if (mbhc->mbhc_cb->mbhc_micbias_control) mbhc->mbhc_cb->mbhc_micbias_control( codec, MIC_BIAS_2, diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 2ab787f57b31..10883b0939d6 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -12127,8 +12127,10 @@ static int tasha_dig_core_power_collapse(struct tasha_priv *tasha, goto unlock_mutex; if (tasha->power_active_ref < 0) { - dev_dbg(tasha->dev, "%s: power_active_ref is negative\n", + dev_info(tasha->dev, + "%s: power_active_ref is negative, resetting it\n", __func__); + tasha->power_active_ref = 0; goto unlock_mutex; } diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index fe975a7dbe4e..058f6c5fa676 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1365,6 +1365,7 @@ static int wsa881x_swr_reset(struct swr_device *pdev) /* Retry after 1 msec delay */ usleep_range(1000, 1100); } + pdev->dev_num = devnum; regcache_mark_dirty(wsa881x->regmap); regcache_sync(wsa881x->regmap); return 0; diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c index 24dbbedf0be7..cc89408fcb39 100644 --- a/sound/soc/msm/msm-dai-fe.c +++ b/sound/soc/msm/msm-dai-fe.c @@ -2641,6 +2641,101 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { .name = "MultiMedia20", .probe = fe_dai_probe, }, + { + .playback = { + .stream_name = "MultiMedia21 Playback", + .aif_name = "MM_DL21", + .rates = (SNDRV_PCM_RATE_8000_384000 | + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 384000, + }, + .ops = &msm_fe_Multimedia_dai_ops, + .name = "MultiMedia21", + .probe = fe_dai_probe, + }, + { + .playback = { + .stream_name = "MultiMedia22 Playback", + .aif_name = "MM_DL22", + .rates = (SNDRV_PCM_RATE_8000_384000 | + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 384000, + }, + .ops = &msm_fe_Multimedia_dai_ops, + .name = "MultiMedia22", + .probe = fe_dai_probe, + }, + { + .playback = { + .stream_name = "MultiMedia23 Playback", + .aif_name = "MM_DL23", + .rates = (SNDRV_PCM_RATE_8000_384000 | + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 384000, + }, + .ops = &msm_fe_Multimedia_dai_ops, + .name = "MultiMedia23", + .probe = fe_dai_probe, + }, + { + .playback = { + .stream_name = "MultiMedia24 Playback", + .aif_name = "MM_DL24", + .rates = (SNDRV_PCM_RATE_8000_384000 | + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 384000, + }, + .ops = &msm_fe_Multimedia_dai_ops, + .name = "MultiMedia24", + .probe = fe_dai_probe, + }, + { + .playback = { + .stream_name = "MultiMedia25 Playback", + .aif_name = "MM_DL25", + .rates = (SNDRV_PCM_RATE_8000_384000 | + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 384000, + }, + .ops = &msm_fe_Multimedia_dai_ops, + .name = "MultiMedia25", + .probe = fe_dai_probe, + }, }; static int msm_fe_dai_dev_probe(struct platform_device *pdev) diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c index 0b37e7e073aa..e715770d909a 100644 --- a/sound/soc/msm/msm8998.c +++ b/sound/soc/msm/msm8998.c @@ -397,7 +397,9 @@ static struct dev_config aux_pcm_tx_cfg[] = { }; static int msm_vi_feed_tx_ch = 2; -static const char *const slim_rx_ch_text[] = {"One", "Two"}; +static const char *const slim_rx_ch_text[] = {"One", "Two", "Three", "Four", + "Five", "Six", "Seven", + "Eight"}; static const char *const slim_tx_ch_text[] = {"One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight"}; @@ -5526,6 +5528,91 @@ static struct snd_soc_dai_link msm_common_misc_fe_dai_links[] = { .ignore_pmdown_time = 1, .be_id = MSM_FRONTEND_DAI_MULTIMEDIA18, }, + { + .name = "MultiMedia21", + .stream_name = "MultiMedia21", + .cpu_dai_name = "MultiMedia21", + .platform_name = "msm-pcm-dsp.0", + .dynamic = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA21, + }, + { + .name = "MultiMedia22", + .stream_name = "MultiMedia22", + .cpu_dai_name = "MultiMedia22", + .platform_name = "msm-pcm-dsp.0", + .dynamic = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA22, + }, + { + .name = "MultiMedia23", + .stream_name = "MultiMedia23", + .cpu_dai_name = "MultiMedia23", + .platform_name = "msm-pcm-dsp.0", + .dynamic = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA23, + }, + { + .name = "MultiMedia24", + .stream_name = "MultiMedia24", + .cpu_dai_name = "MultiMedia24", + .platform_name = "msm-pcm-dsp.0", + .dynamic = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA24, + }, + { + .name = "MultiMedia25", + .stream_name = "MultiMedia25", + .cpu_dai_name = "MultiMedia25", + .platform_name = "msm-pcm-dsp.0", + .dynamic = 1, + .async_ops = ASYNC_DPCM_SND_SOC_PREPARE, + .dpcm_playback = 1, + .codec_dai_name = "snd-soc-dummy-dai", + .codec_name = "snd-soc-dummy", + .trigger = {SND_SOC_DPCM_TRIGGER_POST, + SND_SOC_DPCM_TRIGGER_POST}, + .ignore_suspend = 1, + /* this dainlink has playback support */ + .ignore_pmdown_time = 1, + .be_id = MSM_FRONTEND_DAI_MULTIMEDIA25, + }, }; static struct snd_soc_dai_link msm_common_be_dai_links[] = { diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c index fa8bdddacef2..3610901addea 100644 --- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c @@ -5147,6 +5147,27 @@ static int msm_dai_tdm_q6_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "%s: Clk Rate from DT file %d\n", __func__, tdm_clk_set.clk_freq_in_hz); + /* initialize static tdm clk attribute to default value */ + tdm_clk_set.clk_attri = Q6AFE_LPASS_CLK_ATTRIBUTE_INVERT_COUPLE_NO; + + /* extract tdm clk attribute into static */ + if (of_find_property(pdev->dev.of_node, + "qcom,msm-cpudai-tdm-clk-attribute", NULL)) { + rc = of_property_read_u16(pdev->dev.of_node, + "qcom,msm-cpudai-tdm-clk-attribute", + &tdm_clk_set.clk_attri); + if (rc) { + dev_err(&pdev->dev, "%s: clk attribute from DT file %s\n", + __func__, "qcom,msm-cpudai-tdm-clk-attribute"); + goto rtn; + } + dev_dbg(&pdev->dev, "%s: clk attribute from DT file %d\n", + __func__, tdm_clk_set.clk_attri); + } else { + dev_dbg(&pdev->dev, "%s: No optional clk attribute found\n", + __func__); + } + /* extract tdm clk src master/slave info into static */ rc = of_property_read_u32(pdev->dev.of_node, "qcom,msm-cpudai-tdm-clk-internal", @@ -6226,6 +6247,41 @@ static int msm_dai_q6_tdm_set_tdm_slot(struct snd_soc_dai *dai, return rc; } +static int msm_dai_q6_tdm_set_sysclk(struct snd_soc_dai *dai, + int clk_id, unsigned int freq, int dir) +{ + struct msm_dai_q6_tdm_dai_data *dai_data = + dev_get_drvdata(dai->dev); + + switch (dai->id) { + case AFE_PORT_ID_PRIMARY_TDM_RX: + case AFE_PORT_ID_PRIMARY_TDM_RX_1: + case AFE_PORT_ID_PRIMARY_TDM_RX_2: + case AFE_PORT_ID_PRIMARY_TDM_RX_3: + case AFE_PORT_ID_PRIMARY_TDM_RX_4: + case AFE_PORT_ID_PRIMARY_TDM_RX_5: + case AFE_PORT_ID_PRIMARY_TDM_RX_6: + case AFE_PORT_ID_PRIMARY_TDM_RX_7: + case AFE_PORT_ID_PRIMARY_TDM_TX: + case AFE_PORT_ID_PRIMARY_TDM_TX_1: + case AFE_PORT_ID_PRIMARY_TDM_TX_2: + case AFE_PORT_ID_PRIMARY_TDM_TX_3: + case AFE_PORT_ID_PRIMARY_TDM_TX_4: + case AFE_PORT_ID_PRIMARY_TDM_TX_5: + case AFE_PORT_ID_PRIMARY_TDM_TX_6: + case AFE_PORT_ID_PRIMARY_TDM_TX_7: + dai_data->clk_set.clk_freq_in_hz = freq; + break; + default: + return 0; + } + + dev_dbg(dai->dev, "%s: dai id = 0x%x group clk_freq %d\n", + __func__, dai->id, freq); + return 0; +} + + static int msm_dai_q6_tdm_set_channel_map(struct snd_soc_dai *dai, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) @@ -6653,6 +6709,7 @@ static struct snd_soc_dai_ops msm_dai_q6_tdm_ops = { .hw_params = msm_dai_q6_tdm_hw_params, .set_tdm_slot = msm_dai_q6_tdm_set_tdm_slot, .set_channel_map = msm_dai_q6_tdm_set_channel_map, + .set_sysclk = msm_dai_q6_tdm_set_sysclk, .shutdown = msm_dai_q6_tdm_shutdown, }; diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c index 46a3324d2d6b..de769e8b806c 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c @@ -1603,6 +1603,262 @@ done: return ret; } +static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int ret = 0; + int len = 0; + int i = 0; + struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol); + struct snd_pcm_substream *substream; + struct msm_audio *prtd; + struct asm_stream_pan_ctrl_params pan_param; + + if (!usr_info) { + pr_err("%s: usr_info is null\n", __func__); + ret = -EINVAL; + goto done; + } + + substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; + if (!substream) { + pr_err("%s substream not found\n", __func__); + ret = -EINVAL; + goto done; + } + + if (!substream->runtime) { + pr_err("%s substream runtime not found\n", __func__); + ret = -EINVAL; + goto done; + } + prtd = substream->runtime->private_data; + if (!prtd) { + ret = -EINVAL; + goto done; + } + pan_param.num_output_channels = + ucontrol->value.integer.value[len++]; + if (pan_param.num_output_channels > + PCM_FORMAT_MAX_NUM_CHANNEL) { + ret = -EINVAL; + goto done; + } + pan_param.num_input_channels = + ucontrol->value.integer.value[len++]; + if (pan_param.num_input_channels > + PCM_FORMAT_MAX_NUM_CHANNEL) { + ret = -EINVAL; + goto done; + } + + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < pan_param.num_output_channels; i++) { + pan_param.output_channel_map[i] = + ucontrol->value.integer.value[len++]; + } + } + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < pan_param.num_input_channels; i++) { + pan_param.input_channel_map[i] = + ucontrol->value.integer.value[len++]; + } + } + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < pan_param.num_output_channels * + pan_param.num_input_channels; i++) { + pan_param.gain[i] = + !(ucontrol->value.integer.value[len++] > 0) ? + 0 : 2 << 13; + } + } + + ret = q6asm_set_mfc_panning_params(prtd->audio_client, + &pan_param); + len -= pan_param.num_output_channels * + pan_param.num_input_channels; + for (i = 0; i < pan_param.num_output_channels * + pan_param.num_input_channels; i++) { + pan_param.gain[i] = + ucontrol->value.integer.value[len++]; + } + ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client, + &pan_param); + +done: + return ret; +} + +static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return 0; +} + +static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_pcm *pcm; + struct snd_pcm_usr *pan_ctl_info; + struct snd_kcontrol *kctl; + const char *playback_mixer_ctl_name = "Audio Stream"; + const char *deviceNo = "NN"; + const char *suffix = "Pan Scale Control"; + int ctl_len, ret = 0; + + if (!rtd) { + pr_err("%s: rtd is NULL\n", __func__); + ret = -EINVAL; + goto done; + } + + pcm = rtd->pcm; + ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo) + 1 + + strlen(suffix) + 1; + + ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, + NULL, 1, ctl_len, rtd->dai_link->be_id, + &pan_ctl_info); + + if (ret < 0) { + pr_err("%s: failed add ctl %s. err = %d\n", + __func__, suffix, ret); + goto done; + } + kctl = pan_ctl_info->kctl; + snprintf(kctl->id.name, ctl_len, "%s %d %s", playback_mixer_ctl_name, + rtd->pcm->device, suffix); + kctl->put = msm_pcm_playback_pan_scale_ctl_put; + kctl->get = msm_pcm_playback_pan_scale_ctl_get; + pr_debug("%s: Registering new mixer ctl = %s\n", __func__, + kctl->id.name); +done: + return ret; + +} + +static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + return 0; +} + +static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int ret = 0; + int len = 0; + int i = 0; + struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol); + struct snd_pcm_substream *substream; + struct msm_audio *prtd; + struct asm_stream_pan_ctrl_params dnmix_param; + + int be_id = ucontrol->value.integer.value[len]; + int stream_id = 0; + + if (!usr_info) { + pr_err("%s usr_info is null\n", __func__); + ret = -EINVAL; + goto done; + } + substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; + if (!substream) { + pr_err("%s substream not found\n", __func__); + ret = -EINVAL; + goto done; + } + if (!substream->runtime) { + pr_err("%s substream runtime not found\n", __func__); + ret = -EINVAL; + goto done; + } + prtd = substream->runtime->private_data; + if (!prtd) { + ret = -EINVAL; + goto done; + } + stream_id = prtd->audio_client->session; + dnmix_param.num_output_channels = + ucontrol->value.integer.value[len++]; + if (dnmix_param.num_output_channels > + PCM_FORMAT_MAX_NUM_CHANNEL) { + ret = -EINVAL; + goto done; + } + dnmix_param.num_input_channels = + ucontrol->value.integer.value[len++]; + if (dnmix_param.num_input_channels > + PCM_FORMAT_MAX_NUM_CHANNEL) { + ret = -EINVAL; + goto done; + } + + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < dnmix_param.num_output_channels; i++) { + dnmix_param.output_channel_map[i] = + ucontrol->value.integer.value[len++]; + } + } + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < dnmix_param.num_input_channels; i++) { + dnmix_param.input_channel_map[i] = + ucontrol->value.integer.value[len++]; + } + } + if (ucontrol->value.integer.value[len++]) { + for (i = 0; i < dnmix_param.num_output_channels * + dnmix_param.num_input_channels; i++) { + dnmix_param.gain[i] = + ucontrol->value.integer.value[len++]; + } + } + msm_routing_set_downmix_control_data(be_id, + stream_id, + &dnmix_param); + +done: + return ret; +} + +static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_pcm *pcm; + struct snd_pcm_usr *usr_info; + struct snd_kcontrol *kctl; + const char *playback_mixer_ctl_name = "Audio Device"; + const char *deviceNo = "NN"; + const char *suffix = "Downmix Control"; + int ctl_len, ret = 0; + + if (!rtd) { + pr_err("%s: rtd is NULL\n", __func__); + ret = -EINVAL; + goto done; + } + + pcm = rtd->pcm; + ctl_len = strlen(playback_mixer_ctl_name) + 1 + + strlen(deviceNo) + 1 + strlen(suffix) + 1; + ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, + NULL, 1, ctl_len, rtd->dai_link->be_id, + &usr_info); + if (ret < 0) { + pr_err("%s: downmix control add failed: %d\n", + __func__, ret); + goto done; + } + + kctl = usr_info->kctl; + snprintf(kctl->id.name, ctl_len, "%s %d %s", + playback_mixer_ctl_name, rtd->pcm->device, suffix); + kctl->put = msm_pcm_playback_dnmix_ctl_put; + kctl->get = msm_pcm_playback_dnmix_ctl_get; + pr_debug("%s: downmix control name = %s\n", + __func__, playback_mixer_ctl_name); +done: + return ret; +} + static int msm_pcm_capture_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -1719,6 +1975,14 @@ static int msm_pcm_add_controls(struct snd_soc_pcm_runtime *rtd) if (ret) pr_err("%s: pcm add app type controls failed:%d\n", __func__, ret); + ret = msm_add_stream_pan_scale_controls(rtd); + if (ret) + pr_err("%s: pcm add pan scale controls failed:%d\n", + __func__, ret); + ret = msm_add_device_down_mix_controls(rtd); + if (ret) + pr_err("%s: pcm add dnmix controls failed:%d\n", + __func__, ret); return ret; } diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 974d4a582540..2bf61521ad52 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -35,6 +35,8 @@ #include <sound/audio_cal_utils.h> #include <sound/audio_effects.h> #include <sound/hwdep.h> +#include <sound/q6adm-v2.h> +#include <sound/apr_audio-v2.h> #include "msm-pcm-routing-v2.h" #include "msm-pcm-routing-devdep.h" @@ -118,17 +120,13 @@ static const char * const lsm_port_text[] = { }; struct msm_pcm_route_bdai_pp_params { - u16 port_id; /* AFE port ID */ unsigned long pp_params_config; bool mute_on; int latency; }; static struct msm_pcm_route_bdai_pp_params - msm_bedais_pp_params[MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX] = { - {HDMI_RX, 0, 0, 0}, - {DISPLAY_PORT_RX, 0, 0, 0}, -}; + msm_bedais_pp_params[MSM_BACKEND_DAI_MAX]; /* * The be_dai_name_table is passed to HAL so that it can specify the @@ -142,6 +140,7 @@ struct msm_pcm_route_bdai_name { }; static struct msm_pcm_route_bdai_name be_dai_name_table[MSM_BACKEND_DAI_MAX]; +static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit); static int msm_routing_send_device_pp_params(int port_id, int copp_idx, int fe_id); @@ -953,7 +952,7 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type, uint32_t passthr_mode) { int i, port_type, j, num_copps = 0; - struct route_payload payload; + struct route_payload payload = { {0} }; port_type = ((path_type == ADM_PATH_PLAYBACK || path_type == ADM_PATH_COMPRESSED_RX) ? @@ -983,6 +982,11 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type, fe_dai_app_type_cfg [fedai_id][sess_type][i] .sample_rate; + if (msm_pcm_routing_test_pp_param(i, + ADM_PP_PARAM_LIMITER_BIT)) + set_bit(ADM_STATUS_LIMITER, + &payload.route_status + [num_copps]); num_copps++; } } @@ -1088,7 +1092,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode, port_type = MSM_AFE_PORT_TYPE_RX; } else if (stream_type == SNDRV_PCM_STREAM_CAPTURE) { session_type = SESSION_TYPE_TX; - if (passthr_mode != LEGACY_PCM) + if ((passthr_mode != LEGACY_PCM) && (passthr_mode != LISTEN)) path_type = ADM_PATH_COMPRESSED_TX; else path_type = ADM_PATH_LIVE_REC; @@ -1204,6 +1208,11 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode, fe_dai_app_type_cfg [fe_id][session_type][i] .sample_rate; + if (msm_pcm_routing_test_pp_param(i, + ADM_PP_PARAM_LIMITER_BIT)) + set_bit(ADM_STATUS_LIMITER, + &payload.route_status + [num_copps]); num_copps++; } } @@ -1434,6 +1443,11 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode, fe_dai_app_type_cfg [fedai_id][session_type] [i].sample_rate; + if (msm_pcm_routing_test_pp_param(i, + ADM_PP_PARAM_LIMITER_BIT)) + set_bit(ADM_STATUS_LIMITER, + &payload.route_status + [num_copps]); num_copps++; } } @@ -4090,6 +4104,21 @@ static const struct snd_kcontrol_new slimbus_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_SLIMBUS_0_RX, MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_0_RX, + MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_0_RX, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_0_RX, + MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_0_RX, + MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_0_RX, + MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new mi2s_rx_mixer_controls[] = { @@ -4612,6 +4641,21 @@ static const struct snd_kcontrol_new hdmi_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia19", MSM_BACKEND_DAI_HDMI_RX, MSM_FRONTEND_DAI_MULTIMEDIA19, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_HDMI_RX, + MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_HDMI_RX, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_HDMI_RX, + MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_HDMI_RX, + MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_HDMI_RX, + MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new display_port_mixer_controls[] = { @@ -4760,6 +4804,21 @@ static const struct snd_kcontrol_new slimbus_6_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_6_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_6_RX, + MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_6_RX, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_6_RX, + MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_6_RX, + MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_6_RX, + MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new slimbus_7_rx_mixer_controls[] = { @@ -4811,6 +4870,21 @@ static const struct snd_kcontrol_new slimbus_7_rx_mixer_controls[] = { SOC_SINGLE_EXT("MultiMedia16", MSM_BACKEND_DAI_SLIMBUS_7_RX, MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia21", MSM_BACKEND_DAI_SLIMBUS_7_RX, + MSM_FRONTEND_DAI_MULTIMEDIA21, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia22", MSM_BACKEND_DAI_SLIMBUS_7_RX, + MSM_FRONTEND_DAI_MULTIMEDIA22, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia23", MSM_BACKEND_DAI_SLIMBUS_7_RX, + MSM_FRONTEND_DAI_MULTIMEDIA23, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia24", MSM_BACKEND_DAI_SLIMBUS_7_RX, + MSM_FRONTEND_DAI_MULTIMEDIA24, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MultiMedia25", MSM_BACKEND_DAI_SLIMBUS_7_RX, + MSM_FRONTEND_DAI_MULTIMEDIA25, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), }; static const struct snd_kcontrol_new usb_audio_rx_mixer_controls[] = { @@ -9797,6 +9871,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_0_port_mixer_controls[] = { MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0, + MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_0, + MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_0, + MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_0, + MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_0, MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer, @@ -9864,6 +9954,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_1_port_mixer_controls[] = { MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1, + MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_1, + MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_1, + MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_1, + MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_1, MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer, @@ -9931,6 +10037,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_2_port_mixer_controls[] = { MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2, + MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_2, + MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_2, + MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_2, + MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_2, MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer, @@ -9998,6 +10120,22 @@ static const struct snd_kcontrol_new quat_tdm_rx_3_port_mixer_controls[] = { MSM_BACKEND_DAI_SEC_AUXPCM_TX, 1, 0, msm_routing_get_port_mixer, msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3, + MSM_BACKEND_DAI_PRI_TDM_TX_0, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_RX_3, + MSM_BACKEND_DAI_PRI_TDM_TX_1, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_RX_3, + MSM_BACKEND_DAI_PRI_TDM_TX_2, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_RX_3, + MSM_BACKEND_DAI_PRI_TDM_TX_3, 1, 0, + msm_routing_get_port_mixer, + msm_routing_put_port_mixer), SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_RX_3, MSM_BACKEND_DAI_TERT_TDM_TX_0, 1, 0, msm_routing_get_port_mixer, @@ -11314,6 +11452,11 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { SND_SOC_DAPM_AIF_IN("MM_DL15", "MultiMedia15 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL16", "MultiMedia16 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("MM_DL20", "MultiMedia20 Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("MM_DL21", "MultiMedia21 Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("MM_DL22", "MultiMedia22 Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("MM_DL23", "MultiMedia23 Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("MM_DL24", "MultiMedia24 Playback", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_IN("MM_DL25", "MultiMedia25 Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_IN("VOIP_DL", "VoIP Playback", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL1", "MultiMedia1 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL2", "MultiMedia2 Capture", 0, 0, 0, 0), @@ -12450,6 +12593,11 @@ static const struct snd_soc_dapm_route intercon[] = { {"SLIMBUS_0_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SLIMBUS_0_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, + {"SLIMBUS_0_RX Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SLIMBUS_0_RX Audio Mixer", "MultiMedia22", "MM_DL22"}, + {"SLIMBUS_0_RX Audio Mixer", "MultiMedia23", "MM_DL23"}, + {"SLIMBUS_0_RX Audio Mixer", "MultiMedia24", "MM_DL24"}, + {"SLIMBUS_0_RX Audio Mixer", "MultiMedia25", "MM_DL25"}, {"SLIMBUS_0_RX", NULL, "SLIMBUS_0_RX Audio Mixer"}, {"SLIMBUS_2_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, @@ -12504,6 +12652,11 @@ static const struct snd_soc_dapm_route intercon[] = { {"HDMI Mixer", "MultiMedia14", "MM_DL14"}, {"HDMI Mixer", "MultiMedia15", "MM_DL15"}, {"HDMI Mixer", "MultiMedia16", "MM_DL16"}, + {"HDMI Mixer", "MultiMedia21", "MM_DL21"}, + {"HDMI Mixer", "MultiMedia22", "MM_DL22"}, + {"HDMI Mixer", "MultiMedia23", "MM_DL23"}, + {"HDMI Mixer", "MultiMedia24", "MM_DL24"}, + {"HDMI Mixer", "MultiMedia25", "MM_DL25"}, {"HDMI", NULL, "HDMI Mixer"}, {"DISPLAY_PORT Mixer", "MultiMedia1", "MM_DL1"}, @@ -12575,6 +12728,11 @@ static const struct snd_soc_dapm_route intercon[] = { {"SLIMBUS_6_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SLIMBUS_6_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, + {"SLIMBUS_6_RX Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SLIMBUS_6_RX Audio Mixer", "MultiMedia22", "MM_DL22"}, + {"SLIMBUS_6_RX Audio Mixer", "MultiMedia23", "MM_DL23"}, + {"SLIMBUS_6_RX Audio Mixer", "MultiMedia24", "MM_DL24"}, + {"SLIMBUS_6_RX Audio Mixer", "MultiMedia25", "MM_DL25"}, {"SLIMBUS_6_RX", NULL, "SLIMBUS_6_RX Audio Mixer"}, {"SLIMBUS_7_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, @@ -12593,6 +12751,11 @@ static const struct snd_soc_dapm_route intercon[] = { {"SLIMBUS_7_RX Audio Mixer", "MultiMedia14", "MM_DL14"}, {"SLIMBUS_7_RX Audio Mixer", "MultiMedia15", "MM_DL15"}, {"SLIMBUS_7_RX Audio Mixer", "MultiMedia16", "MM_DL16"}, + {"SLIMBUS_7_RX Audio Mixer", "MultiMedia21", "MM_DL21"}, + {"SLIMBUS_7_RX Audio Mixer", "MultiMedia22", "MM_DL22"}, + {"SLIMBUS_7_RX Audio Mixer", "MultiMedia23", "MM_DL23"}, + {"SLIMBUS_7_RX Audio Mixer", "MultiMedia24", "MM_DL24"}, + {"SLIMBUS_7_RX Audio Mixer", "MultiMedia25", "MM_DL25"}, {"SLIMBUS_7_RX", NULL, "SLIMBUS_7_RX Audio Mixer"}, {"USB_AUDIO_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, @@ -14460,6 +14623,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_0 Port Mixer", "AFE_PCM_TX", "PCM_TX"}, {"QUAT_TDM_RX_0 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"QUAT_TDM_RX_0 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, + {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"QUAT_TDM_RX_0 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, {"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, {"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, {"QUAT_TDM_RX_0 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, @@ -14478,6 +14645,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_1 Port Mixer", "AFE_PCM_TX", "PCM_TX"}, {"QUAT_TDM_RX_1 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"QUAT_TDM_RX_1 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, + {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"QUAT_TDM_RX_1 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, {"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, {"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, {"QUAT_TDM_RX_1 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, @@ -14496,6 +14667,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_2 Port Mixer", "AFE_PCM_TX", "PCM_TX"}, {"QUAT_TDM_RX_2 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"QUAT_TDM_RX_2 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, + {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"QUAT_TDM_RX_2 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, {"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, {"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, {"QUAT_TDM_RX_2 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, @@ -14514,6 +14689,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"QUAT_TDM_RX_3 Port Mixer", "AFE_PCM_TX", "PCM_TX"}, {"QUAT_TDM_RX_3 Port Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"QUAT_TDM_RX_3 Port Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, + {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"QUAT_TDM_RX_3 Port Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, {"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, {"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, {"QUAT_TDM_RX_3 Port Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, @@ -15168,7 +15347,7 @@ done: static int msm_routing_send_device_pp_params(int port_id, int copp_idx, int fe_id) { - int index, topo_id, be_idx; + int topo_id, be_idx; unsigned long pp_config = 0; bool mute_on; int latency; @@ -15192,16 +15371,6 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx, return -EINVAL; } - for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) { - if (msm_bedais_pp_params[index].port_id == port_id) - break; - } - if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) { - pr_err("%s: Invalid backend pp params index %d\n", - __func__, index); - return -EINVAL; - } - topo_id = adm_get_topology_for_port_copp_idx(port_id, copp_idx); if (topo_id != COMPRESSED_PASSTHROUGH_DEFAULT_TOPOLOGY) { pr_err("%s: Invalid passthrough topology 0x%x\n", @@ -15213,11 +15382,11 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx, (msm_bedais[be_idx].passthr_mode[fe_id] == LISTEN)) compr_passthr_mode = false; - pp_config = msm_bedais_pp_params[index].pp_params_config; + pp_config = msm_bedais_pp_params[be_idx].pp_params_config; if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) { pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__); clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config); - mute_on = msm_bedais_pp_params[index].mute_on; + mute_on = msm_bedais_pp_params[be_idx].mute_on; if ((msm_bedais[be_idx].active) && compr_passthr_mode) adm_send_compressed_device_mute(port_id, copp_idx, @@ -15227,7 +15396,7 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx, pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__); clear_bit(ADM_PP_PARAM_LATENCY_BIT, &pp_config); - latency = msm_bedais_pp_params[index].latency; + latency = msm_bedais_pp_params[be_idx].latency; if ((msm_bedais[be_idx].active) && compr_passthr_mode) adm_send_compressed_device_latency(port_id, copp_idx, @@ -15236,18 +15405,47 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx, return 0; } +static bool msm_pcm_routing_test_pp_param(int be_idx, long param_bit) +{ + return test_bit(param_bit, + &msm_bedais_pp_params[be_idx].pp_params_config); +} + +static void msm_routing_set_pp_param(long param_bit, int value) +{ + int be_idx; + + if (value) { + for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) + set_bit(param_bit, + &msm_bedais_pp_params[be_idx]. + pp_params_config); + } else { + for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) + clear_bit(param_bit, + &msm_bedais_pp_params[be_idx]. + pp_params_config); + } +} + static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int pp_id = ucontrol->value.integer.value[0]; + int value = ucontrol->value.integer.value[1]; int port_id = 0; - int index, be_idx, i, topo_id, idx; + int be_idx, i, topo_id, idx; bool mute; int latency; bool compr_passthr_mode = true; pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id); + if (pp_id == ADM_PP_PARAM_LIMITER_ID) { + msm_routing_set_pp_param(ADM_PP_PARAM_LIMITER_BIT, value); + goto done; + } + for (be_idx = 0; be_idx < MSM_BACKEND_DAI_MAX; be_idx++) { port_id = msm_bedais[be_idx].port_id; if (port_id == HDMI_RX || port_id == DISPLAY_PORT_RX) @@ -15259,16 +15457,6 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol, return -EINVAL; } - for (index = 0; index < MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX; index++) { - if (msm_bedais_pp_params[index].port_id == port_id) - break; - } - if (index >= MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX) { - pr_err("%s: Invalid pp params backend index %d\n", - __func__, index); - return -EINVAL; - } - for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0], MSM_FRONTEND_DAI_MM_SIZE) { if ((msm_bedais[be_idx].passthr_mode[i] == LEGACY_PCM) || @@ -15291,22 +15479,20 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol, switch (pp_id) { case ADM_PP_PARAM_MUTE_ID: pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__); - mute = ucontrol->value.integer.value[1] ? true : false; - msm_bedais_pp_params[index].mute_on = mute; + mute = value ? true : false; + msm_bedais_pp_params[be_idx].mute_on = mute; set_bit(ADM_PP_PARAM_MUTE_BIT, - &msm_bedais_pp_params[index].pp_params_config); + &msm_bedais_pp_params[be_idx].pp_params_config); if ((msm_bedais[be_idx].active) && compr_passthr_mode) adm_send_compressed_device_mute(port_id, idx, mute); break; case ADM_PP_PARAM_LATENCY_ID: pr_debug("%s: ADM_PP_PARAM_LATENCY\n", __func__); - msm_bedais_pp_params[index].latency = - ucontrol->value.integer.value[1]; + msm_bedais_pp_params[be_idx].latency = value; set_bit(ADM_PP_PARAM_LATENCY_BIT, - &msm_bedais_pp_params[index].pp_params_config); - latency = msm_bedais_pp_params[index].latency = - ucontrol->value.integer.value[1]; + &msm_bedais_pp_params[be_idx].pp_params_config); + latency = msm_bedais_pp_params[be_idx].latency = value; if ((msm_bedais[be_idx].active) && compr_passthr_mode) adm_send_compressed_device_latency(port_id, idx, latency); @@ -15318,6 +15504,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol, } } } +done: return 0; } @@ -15486,6 +15673,93 @@ static struct snd_pcm_ops msm_routing_pcm_ops = { .prepare = msm_pcm_routing_prepare, }; +int msm_routing_set_downmix_control_data(int be_id, int session_id, + struct asm_stream_pan_ctrl_params *dnmix_param) +{ + int i, rc = 0; + struct adm_pspd_param_data_t data; + struct audproc_chmixer_param_coeff dnmix_cfg; + uint16_t variable_payload = 0; + char *adm_params = NULL; + int port_id, copp_idx = 0; + uint32_t params_length = 0; + + if (be_id >= MSM_BACKEND_DAI_MAX) { + rc = -EINVAL; + return rc; + } + port_id = msm_bedais[be_id].port_id; + copp_idx = adm_get_default_copp_idx(port_id); + pr_debug("%s: port_id - %d, copp_idx %d session id - %d\n", + __func__, port_id, copp_idx, session_id); + + variable_payload = dnmix_param->num_output_channels * sizeof(uint16_t)+ + dnmix_param->num_input_channels * sizeof(uint16_t) + + dnmix_param->num_output_channels * sizeof(uint16_t) * + dnmix_param->num_input_channels * sizeof(uint16_t); + i = (variable_payload % sizeof(uint32_t)); + variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i; + + params_length = variable_payload + + sizeof(struct adm_pspd_param_data_t) + + sizeof(struct audproc_chmixer_param_coeff); + adm_params = kzalloc(params_length, GFP_KERNEL); + if (!adm_params) { + rc = -ENOMEM; + goto end; + } + + data.module_id = AUDPROC_MODULE_ID_CHMIXER; + data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF; + data.param_size = sizeof(struct audproc_chmixer_param_coeff) + + variable_payload; + data.reserved = 0; + memcpy((u8 *)adm_params, &data, sizeof(struct adm_pspd_param_data_t)); + + dnmix_cfg.index = 0; + dnmix_cfg.num_output_channels = dnmix_param->num_output_channels; + dnmix_cfg.num_input_channels = dnmix_param->num_input_channels; + memcpy(((u8 *)adm_params + + sizeof(struct adm_pspd_param_data_t)), + &dnmix_cfg, sizeof(struct audproc_chmixer_param_coeff)); + + memcpy(((u8 *)adm_params + + sizeof(struct adm_pspd_param_data_t) + + sizeof(struct audproc_chmixer_param_coeff)), + dnmix_param->output_channel_map, + dnmix_param->num_output_channels * sizeof(uint16_t)); + memcpy(((u8 *)adm_params + + sizeof(struct adm_pspd_param_data_t) + + sizeof(struct audproc_chmixer_param_coeff) + + dnmix_param->num_output_channels * sizeof(uint16_t)), + dnmix_param->input_channel_map, + dnmix_param->num_input_channels * sizeof(uint16_t)); + memcpy(((u8 *)adm_params + + sizeof(struct adm_pspd_param_data_t) + + sizeof(struct audproc_chmixer_param_coeff) + + (dnmix_param->num_output_channels * sizeof(uint16_t)) + + (dnmix_param->num_input_channels * sizeof(uint16_t))), + dnmix_param->gain, + (dnmix_param->num_output_channels * sizeof(uint16_t)) * + (dnmix_param->num_input_channels * sizeof(uint16_t))); + + if (params_length) { + rc = adm_set_pspd_matrix_params(port_id, + copp_idx, + session_id, + adm_params, + params_length); + if (rc) { + pr_err("%s: send params failed rc=%d\n", __func__, rc); + rc = -EINVAL; + } + } +end: + kfree(adm_params); + return rc; +} + + /* Not used but frame seems to require it */ static int msm_routing_probe(struct snd_soc_platform *platform) { diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h index 19e726001d25..a8daff09db58 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h @@ -193,6 +193,11 @@ enum { MSM_FRONTEND_DAI_MULTIMEDIA18, MSM_FRONTEND_DAI_MULTIMEDIA19, MSM_FRONTEND_DAI_MULTIMEDIA20, + MSM_FRONTEND_DAI_MULTIMEDIA21, + MSM_FRONTEND_DAI_MULTIMEDIA22, + MSM_FRONTEND_DAI_MULTIMEDIA23, + MSM_FRONTEND_DAI_MULTIMEDIA24, + MSM_FRONTEND_DAI_MULTIMEDIA25, MSM_FRONTEND_DAI_CS_VOICE, MSM_FRONTEND_DAI_VOIP, MSM_FRONTEND_DAI_AFE_RX, @@ -218,8 +223,8 @@ enum { MSM_FRONTEND_DAI_MAX, }; -#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA20 + 1) -#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA20 +#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA25 + 1) +#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA25 enum { MSM_BACKEND_DAI_PRI_I2S_RX = 0, @@ -392,12 +397,20 @@ enum { #define RELEASE_LOCK 0 #define ACQUIRE_LOCK 1 -#define MSM_BACKEND_DAI_PP_PARAMS_REQ_MAX 2 #define HDMI_RX_ID 0x8001 -#define ADM_PP_PARAM_MUTE_ID 0 -#define ADM_PP_PARAM_MUTE_BIT 1 -#define ADM_PP_PARAM_LATENCY_ID 1 -#define ADM_PP_PARAM_LATENCY_BIT 2 + +enum { + ADM_PP_PARAM_MUTE_ID, + ADM_PP_PARAM_LATENCY_ID, + ADM_PP_PARAM_LIMITER_ID +}; + +enum { + ADM_PP_PARAM_MUTE_BIT = 0x1, + ADM_PP_PARAM_LATENCY_BIT = 0x2, + ADM_PP_PARAM_LIMITER_BIT = 0x4 +}; + #define BE_DAI_PORT_SESSIONS_IDX_MAX 4 #define BE_DAI_FE_SESSIONS_IDX_MAX 2 @@ -483,4 +496,6 @@ int msm_pcm_routing_reg_stream_app_type_cfg( int msm_pcm_routing_get_stream_app_type_cfg( int fedai_id, int session_type, int *be_id, struct msm_pcm_stream_app_type_cfg *cfg_data); +int msm_routing_set_downmix_control_data(int be_id, int session_id, + struct asm_stream_pan_ctrl_params *pan_param); #endif /*_MSM_PCM_H*/ diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c index cac28f43e5ae..65f5167d9dee 100644 --- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c +++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c @@ -298,11 +298,11 @@ int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx, *update_params_value16++ = op_FR_ip_FR_weight; avail_length = avail_length - (4 * sizeof(uint16_t)); if (params_length) { - rc = adm_set_stereo_to_custom_stereo(port_id, - copp_idx, - session_id, - params_value, - params_length); + rc = adm_set_pspd_matrix_params(port_id, + copp_idx, + session_id, + params_value, + params_length); if (rc) { pr_err("%s: send params failed rc=%d\n", __func__, rc); kfree(params_value); diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index 28aaf2172221..e1bfc950d0e3 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -48,12 +48,6 @@ #define DS2_ADM_COPP_TOPOLOGY_ID 0xFFFFFFFF #endif -/* ENUM for adm_status */ -enum adm_cal_status { - ADM_STATUS_CALIBRATION_REQUIRED = 0, - ADM_STATUS_MAX, -}; - struct adm_copp { atomic_t id[AFE_MAX_PORTS][MAX_COPPS_PER_PORT]; @@ -781,9 +775,9 @@ fail_cmd: return ret; } -int adm_set_stereo_to_custom_stereo(int port_id, int copp_idx, - unsigned int session_id, char *params, - uint32_t params_length) +int adm_set_pspd_matrix_params(int port_id, int copp_idx, + unsigned int session_id, char *params, + uint32_t params_length) { struct adm_cmd_set_pspd_mtmx_strtr_params_v5 *adm_params = NULL; int sz, rc = 0, port_idx; @@ -1413,6 +1407,7 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv) case ADM_CMD_DEVICE_OPEN_V5: case ADM_CMD_DEVICE_CLOSE_V5: case ADM_CMD_DEVICE_OPEN_V6: + case ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1: pr_debug("%s: Basic callback received, wake up.\n", __func__); atomic_set(&this_adm.copp.stat[port_idx] @@ -2695,6 +2690,97 @@ fail_cmd: return; } + +static int adm_set_mtmx_params_v1(int port_idx, int copp_idx, + int params_length, void *params) +{ + struct adm_cmd_set_mtmx_params_v1 *adm_params = NULL; + int rc = 0; + int sz; + + sz = sizeof(*adm_params) + params_length; + adm_params = kzalloc(sz, GFP_KERNEL); + if (!adm_params) + return -ENOMEM; + + memcpy(((u8 *)adm_params + sizeof(*adm_params)), + params, params_length); + adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + adm_params->hdr.pkt_size = sz; + adm_params->hdr.src_svc = APR_SVC_ADM; + adm_params->hdr.src_domain = APR_DOMAIN_APPS; + adm_params->hdr.src_port = 0; + adm_params->hdr.dest_svc = APR_SVC_ADM; + adm_params->hdr.dest_domain = APR_DOMAIN_ADSP; + adm_params->hdr.dest_port = + atomic_read(&this_adm.copp.id[port_idx][copp_idx]); + adm_params->hdr.token = port_idx << 16 | copp_idx; + adm_params->hdr.opcode = ADM_CMD_SET_MTMX_STRTR_DEV_PARAMS_V1; + adm_params->payload_addr_lsw = 0; + adm_params->payload_addr_msw = 0; + adm_params->mem_map_handle = 0; + adm_params->payload_size = params_length; + adm_params->copp_id = atomic_read(&this_adm.copp. + id[port_idx][copp_idx]); + + atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1); + rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params); + if (rc < 0) { + pr_err("%s: Set params failed port_idx = 0x%x rc %d\n", + __func__, port_idx, rc); + rc = -EINVAL; + goto send_param_return; + } + /* Wait for the callback */ + rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx], + atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0, + msecs_to_jiffies(TIMEOUT_MS)); + if (!rc) { + pr_err("%s: Set params timed out port_idx = 0x%x\n", + __func__, port_idx); + rc = -EINVAL; + goto send_param_return; + } else if (atomic_read(&this_adm.copp.stat + [port_idx][copp_idx]) > 0) { + pr_err("%s: DSP returned error[%s]\n", + __func__, adsp_err_get_err_str( + atomic_read(&this_adm.copp.stat + [port_idx][copp_idx]))); + rc = adsp_err_get_lnx_err_code( + atomic_read(&this_adm.copp.stat + [port_idx][copp_idx])); + goto send_param_return; + } + rc = 0; +send_param_return: + kfree(adm_params); + return rc; +} + +static void adm_enable_mtmx_limiter(int port_idx, int copp_idx) +{ + int rc; + struct enable_param_v6 adm_param = { {0} }; + + adm_param.param.module_id = ADM_MTMX_MODULE_STREAM_LIMITER; + adm_param.param.param_id = AUDPROC_PARAM_ID_ENABLE; + adm_param.param.param_size = sizeof(adm_param.enable); + adm_param.enable = 1; + + rc = adm_set_mtmx_params_v1(port_idx, copp_idx, + sizeof(adm_param), &adm_param); + if (rc < 0) { + pr_err("%s: adm_set_mtmx_params_v1 failed port_idx = 0x%x rc %d\n", + __func__, port_idx, rc); + goto done; + } + set_bit(ADM_STATUS_LIMITER, + (void *)&this_adm.copp.adm_status[port_idx][copp_idx]); +done: + return; +} + static void route_set_opcode_matrix_id( struct adm_cmd_matrix_map_routings_v5 **route_addr, int path, uint32_t passthr_mode) @@ -2791,6 +2877,11 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode, copp_idx = payload_map.copp_idx[i]; copps_list[i] = atomic_read(&this_adm.copp.id[port_idx] [copp_idx]); + if (test_bit(ADM_STATUS_LIMITER, + (void *)&payload_map.route_status) && + ((path == ADM_PATH_PLAYBACK) || + (path == ADM_PATH_COMPRESSED_RX))) + adm_enable_mtmx_limiter(port_idx, copp_idx); } atomic_set(&this_adm.matrix_map_stat, -1); @@ -2991,6 +3082,8 @@ int adm_close(int port_id, int perf_mode, int copp_idx) clear_bit(ADM_STATUS_CALIBRATION_REQUIRED, (void *)&this_adm.copp.adm_status[port_idx][copp_idx]); + clear_bit(ADM_STATUS_LIMITER, + (void *)&this_adm.copp.adm_status[port_idx][copp_idx]); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); if (ret < 0) { @@ -4807,8 +4900,7 @@ static int __init adm_init(void) &this_adm.copp.adm_delay_wait[i][j]); atomic_set(&this_adm.copp.topology[i][j], 0); this_adm.copp.adm_delay[i][j] = 0; - this_adm.copp.adm_status[i][j] = - ADM_STATUS_CALIBRATION_REQUIRED; + this_adm.copp.adm_status[i][j] = 0; } } diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index 44e5b01b1ccb..d3cff9fccf59 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -44,7 +44,7 @@ #define TRUE 0x01 #define FALSE 0x00 #define SESSION_MAX 8 - +#define ASM_MAX_CHANNELS 8 enum { ASM_TOPOLOGY_CAL = 0, ASM_CUSTOM_TOP_CAL, @@ -7419,6 +7419,296 @@ int q6asm_set_softvolume_v2(struct audio_client *ac, return __q6asm_set_softvolume(ac, softvol_param, instance); } +int q6asm_set_vol_ctrl_gain_pair(struct audio_client *ac, + struct asm_stream_pan_ctrl_params *pan_param) +{ + int sz = 0; + int rc = 0; + int i = 0; + int32_t ch = 0; + struct apr_hdr hdr; + struct audproc_volume_ctrl_channel_type_gain_pair + gain_data[ASM_MAX_CHANNELS]; + struct asm_stream_cmd_set_pp_params_v2 payload_params; + struct asm_stream_param_data_v2 data; + uint16_t *asm_params = NULL; + + if (ac == NULL) { + pr_err("%s: ac is NULL\n", __func__); + rc = -EINVAL; + goto fail; + } + if (ac->apr == NULL) { + dev_err(ac->dev, "%s: ac apr handle NULL\n", __func__); + rc = -EINVAL; + goto fail; + } + + sz = sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(uint32_t) + + (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * + ASM_MAX_CHANNELS); + asm_params = kzalloc(sz, GFP_KERNEL); + if (!asm_params) { + rc = -ENOMEM; + goto fail; + } + + q6asm_add_hdr_async(ac, &hdr, sz, TRUE); + atomic_set(&ac->cmd_state_pp, -1); + + hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; + memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); + + payload_params.data_payload_addr_lsw = 0; + payload_params.data_payload_addr_msw = 0; + payload_params.mem_map_handle = 0; + payload_params.data_payload_size = + sizeof(struct asm_stream_param_data_v2) + + sizeof(uint32_t) + + (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * + ASM_MAX_CHANNELS); + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), + &payload_params, + sizeof(struct asm_stream_cmd_set_pp_params_v2)); + + data.module_id = AUDPROC_MODULE_ID_VOL_CTRL; + data.param_id = AUDPROC_PARAM_ID_MULTICHANNEL_GAIN; + data.param_size = sizeof(uint32_t) + + (sizeof(struct audproc_volume_ctrl_channel_type_gain_pair) * + ASM_MAX_CHANNELS); + data.reserved = 0; + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2)), + &data, sizeof(struct asm_stream_param_data_v2)); + + ch = pan_param->num_output_channels; + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2)), + &ch, + sizeof(uint32_t)); + + memset(gain_data, 0, + ASM_MAX_CHANNELS * + sizeof(struct audproc_volume_ctrl_channel_type_gain_pair)); + for (i = 0; i < pan_param->num_output_channels; i++) { + gain_data[i].channel_type = + pan_param->output_channel_map[i]; + gain_data[i].gain = pan_param->gain[i]; + } + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(uint32_t)), + gain_data, + ASM_MAX_CHANNELS * + sizeof(struct audproc_volume_ctrl_channel_type_gain_pair)); + + rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); + if (rc < 0) { + pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", + __func__, data.param_id, rc); + goto done; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state_pp) >= 0), 5 * HZ); + if (!rc) { + pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, + data.param_id); + rc = -EINVAL; + goto done; + } + if (atomic_read(&ac->cmd_state_pp) > 0) { + pr_err("%s: DSP returned error[%d], set-params paramid[0x%x]\n", + __func__, atomic_read(&ac->cmd_state_pp), + data.param_id); + rc = -EINVAL; + goto done; + } + rc = 0; +done: + kfree(asm_params); +fail: + return rc; +} + +int q6asm_set_mfc_panning_params(struct audio_client *ac, + struct asm_stream_pan_ctrl_params *pan_param) +{ + int sz, rc, i; + struct audproc_mfc_output_media_fmt mfc_cfg; + struct apr_hdr hdr; + struct asm_stream_cmd_set_pp_params_v2 payload_params; + struct asm_stream_param_data_v2 data; + struct audproc_chmixer_param_coeff pan_cfg; + uint16_t variable_payload = 0; + uint16_t *asm_params = NULL; + + sz = rc = i = 0; + if (ac == NULL) { + pr_err("%s: ac handle NULL\n", __func__); + rc = -EINVAL; + goto fail_cmd1; + } + if (ac->apr == NULL) { + pr_err("%s: ac apr handle NULL\n", __func__); + rc = -EINVAL; + goto fail_cmd1; + } + + sz = sizeof(struct audproc_mfc_output_media_fmt); + q6asm_add_hdr_async(ac, &mfc_cfg.params.hdr, sz, TRUE); + atomic_set(&ac->cmd_state_pp, -1); + mfc_cfg.params.hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; + mfc_cfg.params.payload_addr_lsw = 0; + mfc_cfg.params.payload_addr_msw = 0; + mfc_cfg.params.mem_map_handle = 0; + mfc_cfg.params.payload_size = sizeof(mfc_cfg) - sizeof(mfc_cfg.params); + mfc_cfg.data.module_id = AUDPROC_MODULE_ID_MFC; + mfc_cfg.data.param_id = AUDPROC_PARAM_ID_MFC_OUTPUT_MEDIA_FORMAT; + mfc_cfg.data.param_size = mfc_cfg.params.payload_size - + sizeof(mfc_cfg.data); + mfc_cfg.data.reserved = 0; + mfc_cfg.sampling_rate = 0; + mfc_cfg.bits_per_sample = 0; + mfc_cfg.num_channels = pan_param->num_output_channels; + for (i = 0; i < mfc_cfg.num_channels; i++) + mfc_cfg.channel_type[i] = pan_param->output_channel_map[i]; + + rc = apr_send_pkt(ac->apr, (uint32_t *) &mfc_cfg); + if (rc < 0) { + pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", + __func__, mfc_cfg.data.param_id, rc); + rc = -EINVAL; + goto fail_cmd1; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); + if (!rc) { + pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, + mfc_cfg.data.param_id); + rc = -ETIMEDOUT; + goto fail_cmd1; + } + if (atomic_read(&ac->cmd_state_pp) > 0) { + pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", + __func__, adsp_err_get_err_str( + atomic_read(&ac->cmd_state_pp)), + mfc_cfg.data.param_id); + rc = adsp_err_get_lnx_err_code( + atomic_read(&ac->cmd_state_pp)); + goto fail_cmd1; + } + + variable_payload = pan_param->num_output_channels * sizeof(uint16_t)+ + pan_param->num_input_channels * sizeof(uint16_t) + + pan_param->num_output_channels * sizeof(uint16_t) * + pan_param->num_input_channels * sizeof(uint16_t); + i = (variable_payload % sizeof(uint32_t)); + variable_payload += (i == 0) ? 0 : sizeof(uint32_t) - i; + sz = sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(struct audproc_chmixer_param_coeff) + + variable_payload; + + asm_params = kzalloc(sz, GFP_KERNEL); + if (!asm_params) { + rc = -ENOMEM; + goto fail_cmd1; + } + + q6asm_add_hdr_async(ac, &hdr, sz, TRUE); + atomic_set(&ac->cmd_state_pp, -1); + hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS_V2; + memcpy(((u8 *)asm_params), &hdr, sizeof(struct apr_hdr)); + + payload_params.data_payload_addr_lsw = 0; + payload_params.data_payload_addr_msw = 0; + payload_params.mem_map_handle = 0; + payload_params.data_payload_size = + sizeof(struct audproc_chmixer_param_coeff) + + variable_payload + sizeof(struct asm_stream_param_data_v2); + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr)), + &payload_params, + sizeof(struct asm_stream_cmd_set_pp_params_v2)); + + data.module_id = AUDPROC_MODULE_ID_MFC; + data.param_id = AUDPROC_CHMIXER_PARAM_ID_COEFF; + data.param_size = sizeof(struct audproc_chmixer_param_coeff) + + variable_payload; + data.reserved = 0; + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2)), + &data, sizeof(struct asm_stream_param_data_v2)); + + pan_cfg.index = 0; + pan_cfg.num_output_channels = pan_param->num_output_channels; + pan_cfg.num_input_channels = pan_param->num_input_channels; + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2)), + &pan_cfg, sizeof(struct audproc_chmixer_param_coeff)); + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(struct audproc_chmixer_param_coeff)), + pan_param->output_channel_map, + pan_param->num_output_channels * sizeof(uint16_t)); + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(struct audproc_chmixer_param_coeff) + + pan_param->num_output_channels * sizeof(uint16_t)), + pan_param->input_channel_map, + pan_param->num_input_channels * sizeof(uint16_t)); + memcpy(((u8 *)asm_params + sizeof(struct apr_hdr) + + sizeof(struct asm_stream_cmd_set_pp_params_v2) + + sizeof(struct asm_stream_param_data_v2) + + sizeof(struct audproc_chmixer_param_coeff) + + (pan_param->num_output_channels * sizeof(uint16_t)) + + (pan_param->num_input_channels * sizeof(uint16_t))), + pan_param->gain, + (pan_param->num_output_channels * sizeof(uint16_t)) * + (pan_param->num_input_channels * sizeof(uint16_t))); + + rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params); + if (rc < 0) { + pr_err("%s: set-params send failed paramid[0x%x] rc %d\n", + __func__, data.param_id, rc); + rc = -EINVAL; + goto fail_cmd2; + } + + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state_pp) >= 0), 5*HZ); + if (!rc) { + pr_err("%s: timeout, set-params paramid[0x%x]\n", __func__, + data.param_id); + rc = -ETIMEDOUT; + goto fail_cmd2; + } + if (atomic_read(&ac->cmd_state_pp) > 0) { + pr_err("%s: DSP returned error[%s] set-params paramid[0x%x]\n", + __func__, adsp_err_get_err_str( + atomic_read(&ac->cmd_state_pp)), + data.param_id); + rc = adsp_err_get_lnx_err_code( + atomic_read(&ac->cmd_state_pp)); + goto fail_cmd2; + } + rc = 0; +fail_cmd2: + kfree(asm_params); +fail_cmd1: + return rc; +} + int q6asm_equalizer(struct audio_client *ac, void *eq_p) { struct asm_eq_params eq; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 05012bb178d7..fdd87c7e3e91 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1460,16 +1460,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; - bool reloc = false; -retry: + /* We always need to relocate the address for aranges */ + if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { - addr += baseaddr; - reloc = true; - goto retry; - } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c index 77147b42d598..f1c055f3c243 100644 --- a/tools/vm/page_owner_sort.c +++ b/tools/vm/page_owner_sort.c @@ -79,12 +79,12 @@ static void add_list(char *buf, int len) } } -#define BUF_SIZE 1024 +#define BUF_SIZE (128 * 1024) int main(int argc, char **argv) { FILE *fin, *fout; - char buf[BUF_SIZE]; + char *buf; int ret, i, count; struct block_list *list2; struct stat st; @@ -107,6 +107,11 @@ int main(int argc, char **argv) max_size = st.st_size / 100; /* hack ... */ list = malloc(max_size * sizeof(*list)); + buf = malloc(BUF_SIZE); + if (!list || !buf) { + printf("Out of memory\n"); + exit(1); + } for ( ; ; ) { ret = read_block(buf, BUF_SIZE, fin); |
