diff options
278 files changed, 8847 insertions, 3254 deletions
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt index d8c3a7c35465..32de5ce3da7e 100644 --- a/Documentation/devicetree/bindings/gpu/adreno.txt +++ b/Documentation/devicetree/bindings/gpu/adreno.txt @@ -141,6 +141,9 @@ Optional Properties: rendering thread is running on masked CPUs. Bit 0 is for CPU-0, bit 1 is for CPU-1... +- qcom,l2pc-update-queue: + Disables L2PC on masked CPUs at queue time when it's true. + - qcom,snapshot-size: Specify the size of snapshot in bytes. This will override snapshot size defined in the driver code. diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt index fc019bda50a7..bf3ad8a71c26 100644 --- a/Documentation/devicetree/bindings/pci/msm_pcie.txt +++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt @@ -97,6 +97,9 @@ Optional Properties: and assign for each endpoint. - qcom,ep-latency: The time (unit: ms) to wait for the PCIe endpoint to become stable after power on, before de-assert the PERST to the endpoint. + - qcom,switch-latency: The time (unit: ms) to wait for the PCIe endpoint's link + training with switch downstream port after the link between switch upstream + port and RC is up. - qcom,wr-halt-size: With base 2, this exponent determines the size of the data that PCIe core will halt on for each write transaction. - qcom,cpl-timeout: Completion timeout value. This value specifies the time range @@ -276,6 +279,7 @@ Example: qcom,smmu-exist; qcom,smmu-sid-base = <0x1480>; qcom,ep-latency = <100>; + qcom,switch-latency = <100>; qcom,wr-halt-size = <0xa>; /* 1KB */ qcom,cpl-timeout = <0x2>; diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt index 3527779ef93c..f01eae10bf4f 100644 --- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt +++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb2.txt @@ -178,6 +178,10 @@ Charger specific properties: Definition: WD bark-timeout in seconds. The possible values are 16, 32, 64, 128. If not defined it defaults to 64. +- qcom,sw-jeita-enable + Usage: optional + Value type: bool + Definition: Boolean flag which when present enables sw compensation for jeita ============================================= Second Level Nodes - SMB2 Charger Peripherals diff --git a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt index 061c1d16ad24..77d6bf06ee26 100644 --- a/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt +++ b/Documentation/devicetree/bindings/wcnss/wcnss-wlan.txt @@ -12,7 +12,7 @@ Required properties: "riva_ccu_base", "pronto_a2xb_base", "pronto_ccpu_base", "pronto_saw2_base", "wlan_tx_phy_aborts","wlan_brdg_err_source", "wlan_tx_status", "alarms_txctl", "alarms_tactl", - "pronto_mcu_base". + "pronto_mcu_base", "pronto_qfuse". - interupts: Pronto to Apps interrupts for tx done and rx pending. - qcom,pronto-vddmx-supply: regulator to supply pronto pll. - qcom,pronto-vddcx-supply: voltage corner regulator to supply WLAN/BT/FM @@ -29,7 +29,7 @@ Required properties: - qcom,wcnss-vadc: VADC handle for battery voltage notification APIs. - pinctrl-<n> : Pinctrl states as described in bindings/pinctrl/pinctrl-bindings.txt - pinctrl-names : Names corresponding to the numbered pinctrl states -- clocks: from common clock binding: handle to xo and rf_clk clocks. +- clocks: from common clock binding: handle to xo, rf_clk and wcnss snoc clocks. - clock-names: Names of all the clocks that are accessed by the subsystem - qcom,vdd-voltage-level: This property represents (nominal, min, max) voltage for iris and pronto regulators in milli-volts. @@ -39,11 +39,16 @@ iris and pronto regulators in micro-amps. Optional properties: - qcom,has-autodetect-xo: boolean flag to determine whether Iris XO auto detect should be performed during boot up. +- qcom,snoc-wcnss-clock-freq: indicates the wcnss snoc clock frequency in Hz. +If wcnss_snoc clock is specified in the list of clocks, this property needs +to be set to make it functional. - qcom,wlan-rx-buff-count: WLAN RX buffer count is a configurable value, using a smaller count for this buffer will reduce the memory usage. - qcom,is-pronto-v3: boolean flag to determine the pronto hardware version in use. subsequently correct workqueue will be used by DXE engine to push frames in TX data path. +- qcom,is-dual-band-disable: boolean flag to determine the WLAN dual band + capability. - qcom,is-pronto-vadc: boolean flag to determine Battery voltage feature support for pronto hardware. - qcom,wcnss-pm : <Core rail LDO#, PA rail LDO#, XO settling time, @@ -59,6 +64,8 @@ support for pronto hardware. to use for VBATT feature. - qcom,has-a2xb-split-reg: boolean flag to determine A2xb split timeout limit register is available or not. +- qcom,wcn-external-gpio-support: boolean flag to determine 3.3v gpio support +for pronto hardware for a target. Example: @@ -80,6 +87,7 @@ Example: gpios = <&msmgpio 36 0>, <&msmgpio 37 0>, <&msmgpio 38 0>, <&msmgpio 39 0>, <&msmgpio 40 0>; + qcom,wcn-external-gpio-support; qcom,has-48mhz-xo; qcom,is-pronto-vt; qcom,wlan-rx-buff-count = <512>; @@ -94,7 +102,12 @@ Example: clocks = <&clock_rpm clk_xo_wlan_clk>, <&clock_rpm clk_rf_clk2>, <&clock_debug clk_gcc_debug_mux>, - <&clock_gcc clk_wcnss_m_clk>; - clock-names = "xo", "rf_clk", "measure", "wcnss_debug"; + <&clock_gcc clk_wcnss_m_clk>, + <&clock_gcc clk_snoc_wcnss_a_clk>; + + clock-names = "xo", "rf_clk", "measure", "wcnss_debug", + "snoc_wcnss"; + + qcom,snoc-wcnss-clock-freq = <200000000>; qcom,wcnss-pm = <11 21 1200 1 1 6>; }; @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 76 +SUBLEVEL = 78 EXTRAVERSION = NAME = Blurry Fish Butt diff --git a/android/configs/android-base.cfg b/android/configs/android-base.cfg index 96edb3c84425..d1e3b0891a4e 100644 --- a/android/configs/android-base.cfg +++ b/android/configs/android-base.cfg @@ -17,7 +17,6 @@ CONFIG_AUDIT=y CONFIG_BLK_DEV_INITRD=y CONFIG_CGROUPS=y CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_SCHED=y CONFIG_DEFAULT_SECURITY_SELINUX=y diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi index 4081a21b3134..db33594d3827 100644 --- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi +++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi @@ -713,6 +713,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -731,6 +735,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi index 51a225b82f47..ff3b7b80c449 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,6 +54,8 @@ qcom,ulps-enabled; qcom,dcs-cmd-by-left; qcom,mdss-dsi-tx-eot-append; + qcom,mdss-pan-physical-width-dimension = <68>; + qcom,mdss-pan-physical-height-dimension = <121>; qcom,adjust-timer-wakeup-ms = <1>; qcom,mdss-dsi-on-command = [ diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi index 02c87067f212..933746b8abe7 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,6 +46,8 @@ qcom,mdss-dsi-mdp-trigger = "none"; qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>; qcom,mdss-dsi-tx-eot-append; + qcom,mdss-pan-physical-width-dimension = <68>; + qcom,mdss-pan-physical-height-dimension = <121>; qcom,adjust-timer-wakeup-ms = <1>; qcom,mdss-dsi-on-command = [ diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi index 9dbec1e87fda..2dc5c919190c 100644 --- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi @@ -1016,6 +1016,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -1034,6 +1038,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi index ad14bfd00cd1..1d5e3035afd0 100644 --- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi @@ -838,6 +838,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -856,6 +860,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi index d3ea51268590..c8898ec01992 100644 --- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi @@ -538,6 +538,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -556,6 +560,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi index ff128acb376a..316859a65801 100644 --- a/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996-pinctrl.dtsi @@ -1449,7 +1449,7 @@ }; cnss_pins { - cnss_default: cnss_default { + cnss_bootstrap_active: cnss_bootstrap_active { mux { pins = "gpio46"; function = "gpio"; @@ -1458,6 +1458,20 @@ config { pins = "gpio46"; drive-strength = <16>; + output-high; + bias-pull-up; + }; + }; + cnss_bootstrap_sleep: cnss_bootstrap_sleep { + mux { + pins = "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio46"; + drive-strength = <2>; + output-low; bias-pull-down; }; }; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index dcd884b4a745..f5e059484c95 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -2335,15 +2335,17 @@ qcom,cnss { compatible = "qcom,cnss"; wlan-bootstrap-gpio = <&tlmm 46 0>; - wlan-en-gpio = <&pm8994_gpios 8 0>; + vdd-wlan-en-supply = <&wlan_en_vreg>; vdd-wlan-supply = <&rome_vreg>; vdd-wlan-io-supply = <&pm8994_s4>; vdd-wlan-xtal-supply = <&pm8994_l30>; vdd-wlan-core-supply = <&pm8994_s3>; wlan-ant-switch-supply = <&pm8994_l18_pin_ctrl>; + qcom,wlan-en-vreg-support; qcom,notify-modem-status; - pinctrl-names = "default"; - pinctrl-0 = <&cnss_default>; + pinctrl-names = "bootstrap_active", "bootstrap_sleep"; + pinctrl-0 = <&cnss_bootstrap_active>; + pinctrl-1 = <&cnss_bootstrap_sleep>; qcom,wlan-rc-num = <0>; qcom,wlan-ramdump-dynamic = <0x200000>; @@ -3355,6 +3357,82 @@ }; }; + qcom,msm-dai-tdm-pri-rx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37120>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36864 36866 36868 36870>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_rx_0: qcom,msm-dai-q6-tdm-pri-rx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36864>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_1: qcom,msm-dai-q6-tdm-pri-rx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36866>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_2: qcom,msm-dai-q6-tdm-pri-rx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36868>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_rx_3: qcom,msm-dai-q6-tdm-pri-rx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36870>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + + qcom,msm-dai-tdm-pri-tx { + compatible = "qcom,msm-dai-tdm"; + qcom,msm-cpudai-tdm-group-id = <37121>; + qcom,msm-cpudai-tdm-group-num-ports = <4>; + qcom,msm-cpudai-tdm-group-port-id = <36865 36867 36869 36871>; + qcom,msm-cpudai-tdm-clk-rate = <12288000>; + qcom,msm-cpudai-tdm-clk-internal = <1>; + qcom,msm-cpudai-tdm-sync-mode = <0>; + qcom,msm-cpudai-tdm-sync-src = <1>; + qcom,msm-cpudai-tdm-data-out = <0>; + qcom,msm-cpudai-tdm-invert-sync = <0>; + qcom,msm-cpudai-tdm-data-delay = <1>; + qcom,msm-cpudai-tdm-clk-attribute = /bits/ 16 <1>; + dai_pri_tdm_tx_0: qcom,msm-dai-q6-tdm-pri-tx-0 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36865>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_1: qcom,msm-dai-q6-tdm-pri-tx-1 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36867>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_2: qcom,msm-dai-q6-tdm-pri-tx-2 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36869>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + + dai_pri_tdm_tx_3: qcom,msm-dai-q6-tdm-pri-tx-3 { + compatible = "qcom,msm-dai-q6-tdm"; + qcom,msm-cpudai-tdm-dev-id = <36871>; + qcom,msm-cpudai-tdm-data-align = <0>; + }; + }; + qcom,msm-dai-tdm-sec-tx { compatible = "qcom,msm-dai-tdm"; qcom,msm-cpudai-tdm-group-id = <37137>; diff --git a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi index f10477bab8cf..f39f8b880690 100644 --- a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi @@ -24,7 +24,7 @@ qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; - qcom,saw2-avs-ctl = <0x101c031>; + qcom,saw2-avs-ctl = <0x1010031>; qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; @@ -40,7 +40,7 @@ qcom,vctl-timeout-us = <500>; qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; - qcom,saw2-avs-ctl = <0x101c031>; + qcom,saw2-avs-ctl = <0x1010031>; qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi index 6d73c0eff6c8..5618f02e34f2 100644 --- a/arch/arm/boot/dts/qcom/sdm630.dtsi +++ b/arch/arm/boot/dts/qcom/sdm630.dtsi @@ -1306,18 +1306,9 @@ compatible = "qcom,clk-cpu-osm-sdm630"; reg = <0x179c0000 0x4000>, <0x17916000 0x1000>, <0x17816000 0x1000>, <0x179d1000 0x1000>, - <0x00784130 0x8>, <0x17914800 0x800>, - <0x17814800 0x800>; + <0x00784130 0x8>; reg-names = "osm", "pwrcl_pll", "perfcl_pll", - "apcs_common", "perfcl_efuse", - "pwrcl_acd", "perfcl_acd"; - - qcom,acdtd-val = <0x0000a111 0x0000a111>; - qcom,acdcr-val = <0x002c5ffd 0x002c5ffd>; - qcom,acdsscr-val = <0x00000901 0x00000901>; - qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>; - qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>; - qcom,acdautoxfer-val = <0x00000015 0x00000015>; + "apcs_common", "perfcl_efuse"; vdd-pwrcl-supply = <&apc0_pwrcl_vreg>; vdd-perfcl-supply = <&apc1_perfcl_vreg>; @@ -2041,6 +2032,7 @@ qcom,qsee-ce-hw-instance = <0>; qcom,disk-encrypt-pipe-pair = <2>; qcom,support-fde; + qcom,fde-key-size; qcom,no-clock-support; qcom,msm-bus,name = "qseecom-noc"; qcom,msm-bus,num-cases = <4>; diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts index b108f7d56e57..30b36c0ac541 100644 --- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts +++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts @@ -85,6 +85,10 @@ <&afe_proxy_rx>, <&afe_proxy_tx>, <&incall_record_rx>, <&incall_record_tx>, <&incall_music_rx>, <&incall_music2_rx>, + <&dai_pri_tdm_tx_0>, <&dai_pri_tdm_tx_1>, + <&dai_pri_tdm_tx_2>, <&dai_pri_tdm_tx_3>, + <&dai_pri_tdm_rx_0>, <&dai_pri_tdm_rx_1>, + <&dai_pri_tdm_rx_2>, <&dai_pri_tdm_rx_3>, <&dai_sec_tdm_tx_0>, <&dai_sec_tdm_tx_1>, <&dai_sec_tdm_tx_2>, <&dai_sec_tdm_tx_3>, <&dai_tert_tdm_rx_0>, <&dai_tert_tdm_rx_1>, @@ -103,6 +107,10 @@ "msm-dai-q6-dev.241", "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771", "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773", "msm-dai-q6-dev.32770", + "msm-dai-q6-tdm.36865", "msm-dai-q6-tdm.36867", + "msm-dai-q6-tdm.36869", "msm-dai-q6-tdm.36871", + "msm-dai-q6-tdm.36864", "msm-dai-q6-tdm.36866", + "msm-dai-q6-tdm.36868", "msm-dai-q6-tdm.36870", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36883", "msm-dai-q6-tdm.36885", "msm-dai-q6-tdm.36887", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36898", diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ffd8f12..f13ae153fb24 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* This is the base location for PIE (ET_DYN with INTERP) loads. */ +#define ELF_ET_DYN_BASE 0x400000UL /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index d8d46abef1e0..ac919d751145 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -320,6 +320,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 7ea2442c9bc5..fe6cf23caeab 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -324,6 +324,7 @@ CONFIG_SERIAL_MSM_SMD=y CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y +# CONFIG_DEVPORT is not set CONFIG_MSM_ADSPRPC=y CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index a383c288ef49..b98332269462 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -114,12 +114,11 @@ #define ELF_EXEC_PAGESIZE PAGE_SIZE /* - * This is the location that an ET_DYN program is loaded if exec'ed. Typical - * use of this is to invoke "./ld.so someprog" to test out a new version of - * the loader. We need to make sure that it is out of the way of the program - * that it will "exec", and that there is sufficient room for the brk. + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ @@ -170,7 +169,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index d8d60a57183f..f53725202955 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -39,6 +39,8 @@ struct hppa_dma_ops { ** flush/purge and allocate "regular" cacheable pages for everything. */ +#define DMA_ERROR_CODE (~(dma_addr_t)0) + #ifdef CONFIG_PA11 extern struct hppa_dma_ops pcxl_dma_ops; extern struct hppa_dma_ops pcx_dma_ops; @@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev) break; } } - BUG_ON(!dev->platform_data); return dev->platform_data; } - -#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu) - + +#define GET_IOC(dev) ({ \ + void *__pdata = parisc_walk_tree(dev); \ + __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ +}) #ifdef CONFIG_IOMMU_CCIO struct parisc_device; diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h index 59be25764433..a81226257878 100644 --- a/arch/parisc/include/asm/mmu_context.h +++ b/arch/parisc/include/asm/mmu_context.h @@ -49,15 +49,26 @@ static inline void load_context(mm_context_t context) mtctl(__space_to_prot(context), 8); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) +static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) { - if (prev != next) { mtctl(__pa(next->pgd), 25); load_context(next->context); } } +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} +#define switch_mm_irqs_off switch_mm_irqs_off + #define deactivate_mm(tsk,mm) do { } while (0) static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index d4ffcfbc9885..041e1f9ec129 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -361,7 +361,7 @@ ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ ENTRY_SAME(add_key) ENTRY_SAME(request_key) /* 265 */ - ENTRY_SAME(keyctl) + ENTRY_COMP(keyctl) ENTRY_SAME(ioprio_set) ENTRY_SAME(ioprio_get) ENTRY_SAME(inotify_init) diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 16dbe81c97c9..2f33a67bc531 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -298,7 +298,7 @@ bad_area: case 15: /* Data TLB miss fault/Data page fault */ /* send SIGSEGV when outside of vma */ if (!vma || - address < vma->vm_start || address > vma->vm_end) { + address < vma->vm_start || address >= vma->vm_end) { si.si_signo = SIGSEGV; si.si_code = SEGV_MAPERR; break; diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index ee46ffef608e..743ad7a400d6 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -23,12 +23,13 @@ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0x20000000 +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ + 0x100000000UL) #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index bab6739a1154..b9eb7b1a49d2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -154,14 +154,13 @@ extern unsigned int vdso_enabled; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. 64-bit - tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_32bit_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index dd14616b7739..7de207a11014 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -201,7 +201,7 @@ asmlinkage void sha1_transform_avx2(u32 *digest, const char *data, static bool avx2_usable(void) { - if (avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) + if (false && avx_usable() && boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI1) && boot_cpu_has(X86_FEATURE_BMI2)) return true; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index d262f985bbc8..07cf288b692e 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -245,12 +245,13 @@ extern int force_personality32; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 690b4027e17c..37db36fddc88 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -405,6 +405,8 @@ #define MSR_IA32_TSC_ADJUST 0x0000003b #define MSR_IA32_BNDCFGS 0x00000d90 +#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc + #define MSR_IA32_XSS 0x00000da0 #define FEATURE_CONTROL_LOCKED (1<<0) diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h index 0b1ff4c1c14e..fffb2794dd89 100644 --- a/arch/x86/include/asm/pat.h +++ b/arch/x86/include/asm/pat.h @@ -7,6 +7,7 @@ bool pat_enabled(void); void pat_disable(const char *reason); extern void pat_init(void); +extern void init_cache_modes(void); extern int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d2bbe343fda7..e67b834279b2 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1048,6 +1048,13 @@ void __init setup_arch(char **cmdline_p) if (mtrr_trim_uncached_memory(max_pfn)) max_pfn = e820_end_of_ram_pfn(); + /* + * This call is required when the CPU does not support PAT. If + * mtrr_bp_init() invoked it already via pat_init() the call has no + * effect. + */ + init_cache_modes(); + #ifdef CONFIG_X86_32 /* max_low_pfn get updated here */ find_low_pfn_range(); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 9357b29de9bc..83d6369c45f5 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted) return ret; } +bool kvm_mpx_supported(void) +{ + return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) + && kvm_x86_ops->mpx_supported()); +} +EXPORT_SYMBOL_GPL(kvm_mpx_supported); + u64 kvm_supported_xcr0(void) { u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; - if (!kvm_x86_ops->mpx_supported()) + if (!kvm_mpx_supported()) xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); return xcr0; @@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); - vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); + vcpu->arch.eager_fpu = use_eager_fpu(); if (vcpu->arch.eager_fpu) kvm_x86_ops->fpu_activate(vcpu); @@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, #endif unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; - unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; + unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; /* cpuid 1.edx */ diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 3f5c48ddba45..d1534feefcfe 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -4,6 +4,7 @@ #include "x86.h" int kvm_update_cpuid(struct kvm_vcpu *vcpu); +bool kvm_mpx_supported(void); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, @@ -134,20 +135,20 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) return best && (best->ebx & bit(X86_FEATURE_RTM)); } -static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) +static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_MPX)); + return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); } -static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 7, 0); - return best && (best->ebx & bit(X86_FEATURE_PCOMMIT)); + return best && (best->ebx & bit(X86_FEATURE_MPX)); } static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index bbaa11f4e74b..b12391119ce8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -863,7 +863,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static u64 construct_eptp(unsigned long root_hpa); static void kvm_cpu_vmxon(u64 addr); static void kvm_cpu_vmxoff(void); -static bool vmx_mpx_supported(void); static bool vmx_xsaves_supported(void); static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); @@ -2541,7 +2540,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; /* We support free control of debug control saving. */ @@ -2562,7 +2561,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) VM_ENTRY_LOAD_IA32_PAT; vmx->nested.nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; /* We support free control of debug control loading. */ @@ -2813,7 +2812,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) return 1; msr_info->data = vmcs_read64(GUEST_BNDCFGS); break; @@ -2890,7 +2890,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) vmcs_writel(GUEST_SYSENTER_ESP, data); break; case MSR_IA32_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported() || + (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu))) + return 1; + if (is_noncanonical_address(data & PAGE_MASK) || + (data & MSR_IA32_BNDCFGS_RSVD)) return 1; vmcs_write64(GUEST_BNDCFGS, data); break; @@ -3363,7 +3367,7 @@ static void init_vmcs_shadow_fields(void) for (i = j = 0; i < max_shadow_read_write_fields; i++) { switch (shadow_read_write_fields[i]) { case GUEST_BNDCFGS: - if (!vmx_mpx_supported()) + if (!kvm_mpx_supported()) continue; break; default: @@ -6253,7 +6257,6 @@ static __init int hardware_setup(void) vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); - vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true); memcpy(vmx_msr_bitmap_legacy_x2apic, vmx_msr_bitmap_legacy, PAGE_SIZE); @@ -10265,7 +10268,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); - if (vmx_mpx_supported()) + if (kvm_mpx_supported()) vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); if (nested_cpu_has_xsaves(vmcs12)) vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 27f89c79a44b..423644c230e7 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -80,7 +80,7 @@ ENTRY(copy_user_generic_unrolled) movl %edx,%ecx andl $63,%edx shrl $6,%ecx - jz 17f + jz .L_copy_short_string 1: movq (%rsi),%r8 2: movq 1*8(%rsi),%r9 3: movq 2*8(%rsi),%r10 @@ -101,7 +101,8 @@ ENTRY(copy_user_generic_unrolled) leaq 64(%rdi),%rdi decl %ecx jnz 1b -17: movl %edx,%ecx +.L_copy_short_string: + movl %edx,%ecx andl $7,%edx shrl $3,%ecx jz 20f @@ -215,6 +216,8 @@ ENDPROC(copy_user_generic_string) */ ENTRY(copy_user_enhanced_fast_string) ASM_STAC + cmpl $64,%edx + jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */ movl %edx,%ecx 1: rep movsb diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 6ad687d104ca..3f1bb4f93a5a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -36,14 +36,14 @@ #undef pr_fmt #define pr_fmt(fmt) "" fmt -static bool boot_cpu_done; - -static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT); -static void init_cache_modes(void); +static bool __read_mostly boot_cpu_done; +static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); +static bool __read_mostly pat_initialized; +static bool __read_mostly init_cm_done; void pat_disable(const char *reason) { - if (!__pat_enabled) + if (pat_disabled) return; if (boot_cpu_done) { @@ -51,10 +51,8 @@ void pat_disable(const char *reason) return; } - __pat_enabled = 0; + pat_disabled = true; pr_info("x86/PAT: %s\n", reason); - - init_cache_modes(); } static int __init nopat(char *str) @@ -66,7 +64,7 @@ early_param("nopat", nopat); bool pat_enabled(void) { - return !!__pat_enabled; + return pat_initialized; } EXPORT_SYMBOL_GPL(pat_enabled); @@ -204,6 +202,8 @@ static void __init_cache_modes(u64 pat) update_cache_mode_entry(i, cache); } pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); + + init_cm_done = true; } #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) @@ -224,6 +224,7 @@ static void pat_bsp_init(u64 pat) } wrmsrl(MSR_IA32_CR_PAT, pat); + pat_initialized = true; __init_cache_modes(pat); } @@ -241,10 +242,9 @@ static void pat_ap_init(u64 pat) wrmsrl(MSR_IA32_CR_PAT, pat); } -static void init_cache_modes(void) +void init_cache_modes(void) { u64 pat = 0; - static int init_cm_done; if (init_cm_done) return; @@ -286,8 +286,6 @@ static void init_cache_modes(void) } __init_cache_modes(pat); - - init_cm_done = 1; } /** @@ -305,10 +303,8 @@ void pat_init(void) u64 pat; struct cpuinfo_x86 *c = &boot_cpu_data; - if (!pat_enabled()) { - init_cache_modes(); + if (pat_disabled) return; - } if ((c->x86_vendor == X86_VENDOR_INTEL) && (((c->x86 == 0x6) && (c->x86_model <= 0xd)) || diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 0c2fae8d929d..73eb7fd4aec4 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode) die("Segment relocations found but --realmode not specified\n"); /* Order the relocations for more efficient processing */ - sort_relocs(&relocs16); sort_relocs(&relocs32); #if ELF_BITS == 64 sort_relocs(&relocs32neg); sort_relocs(&relocs64); +#else + sort_relocs(&relocs16); #endif /* Print the relocations */ diff --git a/drivers/android/Makefile b/drivers/android/Makefile index 3b7e4b072c58..4b7c726bb560 100644 --- a/drivers/android/Makefile +++ b/drivers/android/Makefile @@ -1,3 +1,3 @@ ccflags-y += -I$(src) # needed for trace events -obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 0c3cf182e351..7b30dcfabd48 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -15,17 +15,49 @@ * */ +/* + * Locking overview + * + * There are 3 main spinlocks which must be acquired in the + * order shown: + * + * 1) proc->outer_lock : protects binder_ref + * binder_proc_lock() and binder_proc_unlock() are + * used to acq/rel. + * 2) node->lock : protects most fields of binder_node. + * binder_node_lock() and binder_node_unlock() are + * used to acq/rel + * 3) proc->inner_lock : protects the thread and node lists + * (proc->threads, proc->waiting_threads, proc->nodes) + * and all todo lists associated with the binder_proc + * (proc->todo, thread->todo, proc->delivered_death and + * node->async_todo), as well as thread->transaction_stack + * binder_inner_proc_lock() and binder_inner_proc_unlock() + * are used to acq/rel + * + * Any lock under procA must never be nested under any lock at the same + * level or below on procB. + * + * Functions that require a lock held on entry indicate which lock + * in the suffix of the function name: + * + * foo_olocked() : requires node->outer_lock + * foo_nlocked() : requires node->lock + * foo_ilocked() : requires proc->inner_lock + * foo_oilocked(): requires proc->outer_lock and proc->inner_lock + * foo_nilocked(): requires node->lock and proc->inner_lock + * ... + */ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <asm/cacheflush.h> -#include <linux/atomic.h> #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> -#include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> @@ -35,23 +67,32 @@ #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/uaccess.h> -#include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/pid_namespace.h> #include <linux/security.h> +#include <linux/spinlock.h> #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT #define BINDER_IPC_32BIT 1 #endif #include <uapi/linux/android/binder.h> +#include "binder_alloc.h" #include "binder_trace.h" +static HLIST_HEAD(binder_deferred_list); +static DEFINE_MUTEX(binder_deferred_lock); + static HLIST_HEAD(binder_devices); +static HLIST_HEAD(binder_procs); +static DEFINE_MUTEX(binder_procs_lock); + +static HLIST_HEAD(binder_dead_nodes); +static DEFINE_SPINLOCK(binder_dead_nodes_lock); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; -atomic_t binder_last_id; +static atomic_t binder_last_id; +static struct workqueue_struct *binder_deferred_workqueue; #define BINDER_DEBUG_ENTRY(name) \ static int binder_##name##_open(struct inode *inode, struct file *file) \ @@ -97,17 +138,13 @@ enum { BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, - BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, - BINDER_DEBUG_PRIORITY_CAP = 1U << 14, - BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, + BINDER_DEBUG_PRIORITY_CAP = 1U << 13, + BINDER_DEBUG_SPINLOCKS = 1U << 14, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static bool binder_debug_no_lock; -module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); - static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, S_IRUGO); @@ -164,30 +201,27 @@ enum binder_stat_types { }; struct binder_stats { - int br[_IOC_NR(BR_FAILED_REPLY) + 1]; - int bc[_IOC_NR(BC_REPLY_SG) + 1]; -}; - -/* These are still global, since it's not always easy to get the context */ -struct binder_obj_stats { + atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1]; + atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1]; atomic_t obj_created[BINDER_STAT_COUNT]; atomic_t obj_deleted[BINDER_STAT_COUNT]; }; -static struct binder_obj_stats binder_obj_stats; +static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { - atomic_inc(&binder_obj_stats.obj_deleted[type]); + atomic_inc(&binder_stats.obj_deleted[type]); } static inline void binder_stats_created(enum binder_stat_types type) { - atomic_inc(&binder_obj_stats.obj_created[type]); + atomic_inc(&binder_stats.obj_created[type]); } struct binder_transaction_log_entry { int debug_id; + int debug_id_done; int call_type; int from_proc; int from_thread; @@ -197,48 +231,45 @@ struct binder_transaction_log_entry { int to_node; int data_size; int offsets_size; + int return_error_line; + uint32_t return_error; + uint32_t return_error_param; const char *context_name; }; struct binder_transaction_log { - int next; - int full; + atomic_t cur; + bool full; struct binder_transaction_log_entry entry[32]; }; +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; + unsigned int cur = atomic_inc_return(&log->cur); - e = &log->entry[log->next]; - memset(e, 0, sizeof(*e)); - log->next++; - if (log->next == ARRAY_SIZE(log->entry)) { - log->next = 0; + if (cur >= ARRAY_SIZE(log->entry)) log->full = 1; - } + e = &log->entry[cur % ARRAY_SIZE(log->entry)]; + WRITE_ONCE(e->debug_id_done, 0); + /* + * write-barrier to synchronize access to e->debug_id_done. + * We make sure the initialized 0 value is seen before + * memset() other fields are zeroed by memset. + */ + smp_wmb(); + memset(e, 0, sizeof(*e)); return e; } struct binder_context { struct binder_node *binder_context_mgr_node; + struct mutex context_mgr_node_lock; + kuid_t binder_context_mgr_uid; const char *name; - - struct mutex binder_main_lock; - struct mutex binder_deferred_lock; - struct mutex binder_mmap_lock; - - struct hlist_head binder_procs; - struct hlist_head binder_dead_nodes; - struct hlist_head binder_deferred_list; - - struct work_struct deferred_work; - struct workqueue_struct *binder_deferred_workqueue; - struct binder_transaction_log transaction_log; - struct binder_transaction_log transaction_log_failed; - - struct binder_stats binder_stats; }; struct binder_device { @@ -247,11 +278,20 @@ struct binder_device { struct binder_context context; }; +/** + * struct binder_work - work enqueued on a worklist + * @entry: node enqueued on list + * @type: type of work to be performed + * + * There are separate work lists for proc, thread, and node (async). + */ struct binder_work { struct list_head entry; + enum { BINDER_WORK_TRANSACTION = 1, BINDER_WORK_TRANSACTION_COMPLETE, + BINDER_WORK_RETURN_ERROR, BINDER_WORK_NODE, BINDER_WORK_DEAD_BINDER, BINDER_WORK_DEAD_BINDER_AND_CLEAR, @@ -259,8 +299,76 @@ struct binder_work { } type; }; +struct binder_error { + struct binder_work work; + uint32_t cmd; +}; + +/** + * struct binder_node - binder node bookkeeping + * @debug_id: unique ID for debugging + * (invariant after initialized) + * @lock: lock for node fields + * @work: worklist element for node work + * (protected by @proc->inner_lock) + * @rb_node: element for proc->nodes tree + * (protected by @proc->inner_lock) + * @dead_node: element for binder_dead_nodes list + * (protected by binder_dead_nodes_lock) + * @proc: binder_proc that owns this node + * (invariant after initialized) + * @refs: list of references on this node + * (protected by @lock) + * @internal_strong_refs: used to take strong references when + * initiating a transaction + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_weak_refs: weak user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @local_strong_refs: strong user refs from local process + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @tmp_refs: temporary kernel refs + * (protected by @proc->inner_lock while @proc + * is valid, and by binder_dead_nodes_lock + * if @proc is NULL. During inc/dec and node release + * it is also protected by @lock to provide safety + * as the node dies and @proc becomes NULL) + * @ptr: userspace pointer for node + * (invariant, no lock needed) + * @cookie: userspace cookie for node + * (invariant, no lock needed) + * @has_strong_ref: userspace notified of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_strong_ref: userspace has acked notification of strong ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_weak_ref: userspace notified of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @pending_weak_ref: userspace has acked notification of weak ref + * (protected by @proc->inner_lock if @proc + * and by @lock) + * @has_async_transaction: async transaction to node in progress + * (protected by @lock) + * @sched_policy: minimum scheduling policy for node + * (invariant after initialized) + * @accept_fds: file descriptor operations supported for node + * (invariant after initialized) + * @min_priority: minimum scheduling priority + * (invariant after initialized) + * @inherit_rt: inherit RT scheduling policy from caller + * (invariant after initialized) + * @async_todo: list of async work items + * (protected by @proc->inner_lock) + * + * Bookkeeping structure for binder nodes. + */ struct binder_node { int debug_id; + spinlock_t lock; struct binder_work work; union { struct rb_node rb_node; @@ -271,88 +379,185 @@ struct binder_node { int internal_strong_refs; int local_weak_refs; int local_strong_refs; + int tmp_refs; binder_uintptr_t ptr; binder_uintptr_t cookie; - unsigned has_strong_ref:1; - unsigned pending_strong_ref:1; - unsigned has_weak_ref:1; - unsigned pending_weak_ref:1; - unsigned has_async_transaction:1; - unsigned accept_fds:1; - unsigned min_priority:8; + struct { + /* + * bitfield elements protected by + * proc inner_lock + */ + u8 has_strong_ref:1; + u8 pending_strong_ref:1; + u8 has_weak_ref:1; + u8 pending_weak_ref:1; + }; + struct { + /* + * invariant after initialization + */ + u8 sched_policy:2; + u8 inherit_rt:1; + u8 accept_fds:1; + u8 min_priority; + }; + bool has_async_transaction; struct list_head async_todo; }; struct binder_ref_death { + /** + * @work: worklist element for death notifications + * (protected by inner_lock of the proc that + * this ref belongs to) + */ struct binder_work work; binder_uintptr_t cookie; }; +/** + * struct binder_ref_data - binder_ref counts and id + * @debug_id: unique ID for the ref + * @desc: unique userspace handle for ref + * @strong: strong ref count (debugging only if not locked) + * @weak: weak ref count (debugging only if not locked) + * + * Structure to hold ref count and ref id information. Since + * the actual ref can only be accessed with a lock, this structure + * is used to return information about the ref to callers of + * ref inc/dec functions. + */ +struct binder_ref_data { + int debug_id; + uint32_t desc; + int strong; + int weak; +}; + +/** + * struct binder_ref - struct to track references on nodes + * @data: binder_ref_data containing id, handle, and current refcounts + * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree + * @rb_node_node: node for lookup by @node in proc's rb_tree + * @node_entry: list entry for node->refs list in target node + * (protected by @node->lock) + * @proc: binder_proc containing ref + * @node: binder_node of target node. When cleaning up a + * ref for deletion in binder_cleanup_ref, a non-NULL + * @node indicates the node must be freed + * @death: pointer to death notification (ref_death) if requested + * (protected by @node->lock) + * + * Structure to track references from procA to target node (on procB). This + * structure is unsafe to access without holding @proc->outer_lock. + */ struct binder_ref { /* Lookups needed: */ /* node + proc => ref (transaction) */ /* desc + proc => ref (transaction, inc/dec ref) */ /* node => refs + procs (proc exit) */ - int debug_id; + struct binder_ref_data data; struct rb_node rb_node_desc; struct rb_node rb_node_node; struct hlist_node node_entry; struct binder_proc *proc; struct binder_node *node; - uint32_t desc; - int strong; - int weak; struct binder_ref_death *death; }; -struct binder_buffer { - struct list_head entry; /* free and allocated entries by address */ - struct rb_node rb_node; /* free entry by size or allocated entry */ - /* by address */ - unsigned free:1; - unsigned allow_user_free:1; - unsigned async_transaction:1; - unsigned debug_id:29; - - struct binder_transaction *transaction; - - struct binder_node *target_node; - size_t data_size; - size_t offsets_size; - size_t extra_buffers_size; - uint8_t data[0]; -}; - enum binder_deferred_state { BINDER_DEFERRED_PUT_FILES = 0x01, BINDER_DEFERRED_FLUSH = 0x02, BINDER_DEFERRED_RELEASE = 0x04, }; +/** + * struct binder_priority - scheduler policy and priority + * @sched_policy scheduler policy + * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT + * + * The binder driver supports inheriting the following scheduler policies: + * SCHED_NORMAL + * SCHED_BATCH + * SCHED_FIFO + * SCHED_RR + */ +struct binder_priority { + unsigned int sched_policy; + int prio; +}; + +/** + * struct binder_proc - binder process bookkeeping + * @proc_node: element for binder_procs list + * @threads: rbtree of binder_threads in this proc + * (protected by @inner_lock) + * @nodes: rbtree of binder nodes associated with + * this proc ordered by node->ptr + * (protected by @inner_lock) + * @refs_by_desc: rbtree of refs ordered by ref->desc + * (protected by @outer_lock) + * @refs_by_node: rbtree of refs ordered by ref->node + * (protected by @outer_lock) + * @waiting_threads: threads currently waiting for proc work + * (protected by @inner_lock) + * @pid PID of group_leader of process + * (invariant after initialized) + * @tsk task_struct for group_leader of process + * (invariant after initialized) + * @files files_struct for process + * (invariant after initialized) + * @deferred_work_node: element for binder_deferred_list + * (protected by binder_deferred_lock) + * @deferred_work: bitmap of deferred work to perform + * (protected by binder_deferred_lock) + * @is_dead: process is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @inner_lock) + * @todo: list of work for this process + * (protected by @inner_lock) + * @wait: wait queue head to wait for proc work + * (invariant after initialized) + * @stats: per-process binder statistics + * (atomics, no lock needed) + * @delivered_death: list of delivered death notification + * (protected by @inner_lock) + * @max_threads: cap on number of binder threads + * (protected by @inner_lock) + * @requested_threads: number of binder threads requested but not + * yet started. In current implementation, can + * only be 0 or 1. + * (protected by @inner_lock) + * @requested_threads_started: number binder threads started + * (protected by @inner_lock) + * @tmp_ref: temporary reference to indicate proc is in use + * (protected by @inner_lock) + * @default_priority: default scheduler priority + * (invariant after initialized) + * @debugfs_entry: debugfs node + * @alloc: binder allocator bookkeeping + * @context: binder_context for this proc + * (invariant after initialized) + * @inner_lock: can nest under outer_lock and/or node lock + * @outer_lock: no nesting under innor or node lock + * Lock order: 1) outer, 2) node, 3) inner + * + * Bookkeeping structure for binder processes + */ struct binder_proc { struct hlist_node proc_node; struct rb_root threads; struct rb_root nodes; struct rb_root refs_by_desc; struct rb_root refs_by_node; + struct list_head waiting_threads; int pid; - struct vm_area_struct *vma; - struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; int deferred_work; - void *buffer; - ptrdiff_t user_buffer_offset; - - struct list_head buffers; - struct rb_root free_buffers; - struct rb_root allocated_buffers; - size_t free_async_space; + bool is_dead; - struct page **pages; - size_t buffer_size; - uint32_t buffer_free; struct list_head todo; wait_queue_head_t wait; struct binder_stats stats; @@ -360,10 +565,13 @@ struct binder_proc { int max_threads; int requested_threads; int requested_threads_started; - int ready_threads; - long default_priority; + int tmp_ref; + struct binder_priority default_priority; struct dentry *debugfs_entry; + struct binder_alloc alloc; struct binder_context *context; + spinlock_t inner_lock; + spinlock_t outer_lock; }; enum { @@ -372,22 +580,60 @@ enum { BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, - BINDER_LOOPER_STATE_NEED_RETURN = 0x20 + BINDER_LOOPER_STATE_POLL = 0x20, }; +/** + * struct binder_thread - binder thread bookkeeping + * @proc: binder process for this thread + * (invariant after initialization) + * @rb_node: element for proc->threads rbtree + * (protected by @proc->inner_lock) + * @waiting_thread_node: element for @proc->waiting_threads list + * (protected by @proc->inner_lock) + * @pid: PID for this thread + * (invariant after initialization) + * @looper: bitmap of looping state + * (only accessed by this thread) + * @looper_needs_return: looping thread needs to exit driver + * (no lock needed) + * @transaction_stack: stack of in-progress transactions for this thread + * (protected by @proc->inner_lock) + * @todo: list of work to do for this thread + * (protected by @proc->inner_lock) + * @return_error: transaction errors reported by this thread + * (only accessed by this thread) + * @reply_error: transaction errors reported by target thread + * (protected by @proc->inner_lock) + * @wait: wait queue for thread work + * @stats: per-thread statistics + * (atomics, no lock needed) + * @tmp_ref: temporary reference to indicate thread is in use + * (atomic since @proc->inner_lock cannot + * always be acquired) + * @is_dead: thread is dead and awaiting free + * when outstanding transactions are cleaned up + * (protected by @proc->inner_lock) + * @task: struct task_struct for this thread + * + * Bookkeeping structure for binder threads. + */ struct binder_thread { struct binder_proc *proc; struct rb_node rb_node; + struct list_head waiting_thread_node; int pid; - int looper; + int looper; /* only modified by this thread */ + bool looper_need_return; /* can be written by other thread */ struct binder_transaction *transaction_stack; struct list_head todo; - uint32_t return_error; /* Write failed, return error code in read buf */ - uint32_t return_error2; /* Write failed, return error code in read */ - /* buffer. Used when sending a reply to a dead process that */ - /* we are also waiting on */ + struct binder_error return_error; + struct binder_error reply_error; wait_queue_head_t wait; struct binder_stats stats; + atomic_t tmp_ref; + bool is_dead; + struct task_struct *task; }; struct binder_transaction { @@ -404,20 +650,263 @@ struct binder_transaction { struct binder_buffer *buffer; unsigned int code; unsigned int flags; - long priority; - long saved_priority; + struct binder_priority priority; + struct binder_priority saved_priority; + bool set_priority_called; kuid_t sender_euid; + /** + * @lock: protects @from, @to_proc, and @to_thread + * + * @from, @to_proc, and @to_thread can be set to NULL + * during thread teardown + */ + spinlock_t lock; }; +/** + * binder_proc_lock() - Acquire outer lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->outer_lock. Used to protect binder_ref + * structures associated with the given proc. + */ +#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) +static void +_binder_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->outer_lock); +} + +/** + * binder_proc_unlock() - Release spinlock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_proc_lock() + */ +#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) +static void +_binder_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->outer_lock); +} + +/** + * binder_inner_proc_lock() - Acquire inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Acquires proc->inner_lock. Used to protect todo lists + */ +#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) +static void +_binder_inner_proc_lock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&proc->inner_lock); +} + +/** + * binder_inner_proc_unlock() - Release inner lock for given binder_proc + * @proc: struct binder_proc to acquire + * + * Release lock acquired via binder_inner_proc_lock() + */ +#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) +static void +_binder_inner_proc_unlock(struct binder_proc *proc, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&proc->inner_lock); +} + +/** + * binder_node_lock() - Acquire spinlock for given binder_node + * @node: struct binder_node to acquire + * + * Acquires node->lock. Used to protect binder_node fields + */ +#define binder_node_lock(node) _binder_node_lock(node, __LINE__) +static void +_binder_node_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); +} + +/** + * binder_node_unlock() - Release spinlock for given binder_proc + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) +static void +_binder_node_unlock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_unlock(&node->lock); +} + +/** + * binder_node_inner_lock() - Acquire node and inner locks + * @node: struct binder_node to acquire + * + * Acquires node->lock. If node->proc also acquires + * proc->inner_lock. Used to protect binder_node fields + */ +#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) +static void +_binder_node_inner_lock(struct binder_node *node, int line) +{ + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + spin_lock(&node->lock); + if (node->proc) + binder_inner_proc_lock(node->proc); +} + +/** + * binder_node_unlock() - Release node and inner locks + * @node: struct binder_node to acquire + * + * Release lock acquired via binder_node_lock() + */ +#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) +static void +_binder_node_inner_unlock(struct binder_node *node, int line) +{ + struct binder_proc *proc = node->proc; + + binder_debug(BINDER_DEBUG_SPINLOCKS, + "%s: line=%d\n", __func__, line); + if (proc) + binder_inner_proc_unlock(proc); + spin_unlock(&node->lock); +} + +static bool binder_worklist_empty_ilocked(struct list_head *list) +{ + return list_empty(list); +} + +/** + * binder_worklist_empty() - Check if no items on the work list + * @proc: binder_proc associated with list + * @list: list to check + * + * Return: true if there are no items on list, else false + */ +static bool binder_worklist_empty(struct binder_proc *proc, + struct list_head *list) +{ + bool ret; + + binder_inner_proc_lock(proc); + ret = binder_worklist_empty_ilocked(list); + binder_inner_proc_unlock(proc); + return ret; +} + +static void +binder_enqueue_work_ilocked(struct binder_work *work, + struct list_head *target_list) +{ + BUG_ON(target_list == NULL); + BUG_ON(work->entry.next && !list_empty(&work->entry)); + list_add_tail(&work->entry, target_list); +} + +/** + * binder_enqueue_work() - Add an item to the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to add to list + * @target_list: list to add work to + * + * Adds the work to the specified list. Asserts that work + * is not already on a list. + */ +static void +binder_enqueue_work(struct binder_proc *proc, + struct binder_work *work, + struct list_head *target_list) +{ + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked(work, target_list); + binder_inner_proc_unlock(proc); +} + +static void +binder_dequeue_work_ilocked(struct binder_work *work) +{ + list_del_init(&work->entry); +} + +/** + * binder_dequeue_work() - Removes an item from the work list + * @proc: binder_proc associated with list + * @work: struct binder_work to remove from list + * + * Removes the specified work item from whatever list it is on. + * Can safely be called if work is not on any list. + */ +static void +binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) +{ + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(work); + binder_inner_proc_unlock(proc); +} + +static struct binder_work *binder_dequeue_work_head_ilocked( + struct list_head *list) +{ + struct binder_work *w; + + w = list_first_entry_or_null(list, struct binder_work, entry); + if (w) + list_del_init(&w->entry); + return w; +} + +/** + * binder_dequeue_work_head() - Dequeues the item at head of list + * @proc: binder_proc associated with list + * @list: list to dequeue head + * + * Removes the head of the list if there are items on the list + * + * Return: pointer dequeued binder_work, NULL if list was empty + */ +static struct binder_work *binder_dequeue_work_head( + struct binder_proc *proc, + struct list_head *list) +{ + struct binder_work *w; + + binder_inner_proc_lock(proc); + w = binder_dequeue_work_head_ilocked(list); + binder_inner_proc_unlock(proc); + return w; +} + static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); +static void binder_free_thread(struct binder_thread *thread); +static void binder_free_proc(struct binder_proc *proc); +static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; - int ret; if (files == NULL) return -ESRCH; @@ -428,11 +917,7 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - preempt_enable_no_resched(); - ret = __alloc_fd(files, 0, rlim_cur, flags); - preempt_disable(); - - return ret; + return __alloc_fd(files, 0, rlim_cur, flags); } /* @@ -441,11 +926,8 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { - if (proc->files) { - preempt_enable_no_resched(); + if (proc->files) __fd_install(proc->files, fd, file); - preempt_disable(); - } } /* @@ -469,526 +951,281 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) return retval; } -static inline void binder_lock(struct binder_context *context, const char *tag) -{ - trace_binder_lock(tag); - mutex_lock(&context->binder_main_lock); - preempt_disable(); - trace_binder_locked(tag); -} - -static inline void binder_unlock(struct binder_context *context, - const char *tag) +static bool binder_has_work_ilocked(struct binder_thread *thread, + bool do_proc_work) { - trace_binder_unlock(tag); - mutex_unlock(&context->binder_main_lock); - preempt_enable(); + return !binder_worklist_empty_ilocked(&thread->todo) || + thread->looper_need_return || + (do_proc_work && + !binder_worklist_empty_ilocked(&thread->proc->todo)); } -static inline void *kzalloc_preempt_disabled(size_t size) +static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) { - void *ptr; + bool has_work; - ptr = kzalloc(size, GFP_NOWAIT); - if (ptr) - return ptr; + binder_inner_proc_lock(thread->proc); + has_work = binder_has_work_ilocked(thread, do_proc_work); + binder_inner_proc_unlock(thread->proc); - preempt_enable_no_resched(); - ptr = kzalloc(size, GFP_KERNEL); - preempt_disable(); - - return ptr; -} - -static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n) -{ - long ret; - - preempt_enable_no_resched(); - ret = copy_to_user(to, from, n); - preempt_disable(); - return ret; + return has_work; } -static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n) +static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { - long ret; - - preempt_enable_no_resched(); - ret = copy_from_user(to, from, n); - preempt_disable(); - return ret; + return !thread->transaction_stack && + binder_worklist_empty_ilocked(&thread->todo) && + (thread->looper & (BINDER_LOOPER_STATE_ENTERED | + BINDER_LOOPER_STATE_REGISTERED)); } -#define get_user_preempt_disabled(x, ptr) \ -({ \ - int __ret; \ - preempt_enable_no_resched(); \ - __ret = get_user(x, ptr); \ - preempt_disable(); \ - __ret; \ -}) - -#define put_user_preempt_disabled(x, ptr) \ -({ \ - int __ret; \ - preempt_enable_no_resched(); \ - __ret = put_user(x, ptr); \ - preempt_disable(); \ - __ret; \ -}) - -static void binder_set_nice(long nice) +static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, + bool sync) { - long min_nice; + struct rb_node *n; + struct binder_thread *thread; - if (can_nice(current, nice)) { - set_user_nice(current, nice); - return; + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { + thread = rb_entry(n, struct binder_thread, rb_node); + if (thread->looper & BINDER_LOOPER_STATE_POLL && + binder_available_for_proc_work_ilocked(thread)) { + if (sync) + wake_up_interruptible_sync(&thread->wait); + else + wake_up_interruptible(&thread->wait); + } } - min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) - return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); } -static size_t binder_buffer_size(struct binder_proc *proc, - struct binder_buffer *buffer) -{ - if (list_is_last(&buffer->entry, &proc->buffers)) - return proc->buffer + proc->buffer_size - (void *)buffer->data; - return (size_t)list_entry(buffer->entry.next, - struct binder_buffer, entry) - (size_t)buffer->data; -} - -static void binder_insert_free_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) +/** + * binder_select_thread_ilocked() - selects a thread for doing proc work. + * @proc: process to select a thread from + * + * Note that calling this function moves the thread off the waiting_threads + * list, so it can only be woken up by the caller of this function, or a + * signal. Therefore, callers *should* always wake up the thread this function + * returns. + * + * Return: If there's a thread currently waiting for process work, + * returns that thread. Otherwise returns NULL. + */ +static struct binder_thread * +binder_select_thread_ilocked(struct binder_proc *proc) { - struct rb_node **p = &proc->free_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - size_t buffer_size; - size_t new_buffer_size; - - BUG_ON(!new_buffer->free); - - new_buffer_size = binder_buffer_size(proc, new_buffer); - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", - proc->pid, new_buffer_size, new_buffer); + struct binder_thread *thread; - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); + BUG_ON(!spin_is_locked(&proc->inner_lock)); + thread = list_first_entry_or_null(&proc->waiting_threads, + struct binder_thread, + waiting_thread_node); - buffer_size = binder_buffer_size(proc, buffer); + if (thread) + list_del_init(&thread->waiting_thread_node); - if (new_buffer_size < buffer_size) - p = &parent->rb_left; - else - p = &parent->rb_right; - } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); + return thread; } -static void binder_insert_allocated_buffer(struct binder_proc *proc, - struct binder_buffer *new_buffer) +/** + * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. + * @proc: process to wake up a thread in + * @thread: specific thread to wake-up (may be NULL) + * @sync: whether to do a synchronous wake-up + * + * This function wakes up a thread in the @proc process. + * The caller may provide a specific thread to wake-up in + * the @thread parameter. If @thread is NULL, this function + * will wake up threads that have called poll(). + * + * Note that for this function to work as expected, callers + * should first call binder_select_thread() to find a thread + * to handle the work (if they don't have a thread already), + * and pass the result into the @thread parameter. + */ +static void binder_wakeup_thread_ilocked(struct binder_proc *proc, + struct binder_thread *thread, + bool sync) { - struct rb_node **p = &proc->allocated_buffers.rb_node; - struct rb_node *parent = NULL; - struct binder_buffer *buffer; - - BUG_ON(new_buffer->free); - - while (*p) { - parent = *p; - buffer = rb_entry(parent, struct binder_buffer, rb_node); - BUG_ON(buffer->free); + BUG_ON(!spin_is_locked(&proc->inner_lock)); - if (new_buffer < buffer) - p = &parent->rb_left; - else if (new_buffer > buffer) - p = &parent->rb_right; + if (thread) { + if (sync) + wake_up_interruptible_sync(&thread->wait); else - BUG(); + wake_up_interruptible(&thread->wait); + return; } - rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); + + /* Didn't find a thread waiting for proc work; this can happen + * in two scenarios: + * 1. All threads are busy handling transactions + * In that case, one of those threads should call back into + * the kernel driver soon and pick up this work. + * 2. Threads are using the (e)poll interface, in which case + * they may be blocked on the waitqueue without having been + * added to waiting_threads. For this case, we just iterate + * over all threads not handling transaction work, and + * wake them all up. We wake all because we don't know whether + * a thread that called into (e)poll is handling non-binder + * work currently. + */ + binder_wakeup_poll_threads_ilocked(proc, sync); } -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, - uintptr_t user_ptr) +static void binder_wakeup_proc_ilocked(struct binder_proc *proc) { - struct rb_node *n = proc->allocated_buffers.rb_node; - struct binder_buffer *buffer; - struct binder_buffer *kern_ptr; - - kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset - - offsetof(struct binder_buffer, data)); + struct binder_thread *thread = binder_select_thread_ilocked(proc); - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(buffer->free); - - if (kern_ptr < buffer) - n = n->rb_left; - else if (kern_ptr > buffer) - n = n->rb_right; - else - return buffer; - } - return NULL; + binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } -static int binder_update_page_range(struct binder_proc *proc, int allocate, - void *start, void *end, - struct vm_area_struct *vma) +static bool is_rt_policy(int policy) { - void *page_addr; - unsigned long user_page_addr; - struct page **page; - struct mm_struct *mm; - - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, - allocate ? "allocate" : "free", start, end); + return policy == SCHED_FIFO || policy == SCHED_RR; +} - if (end <= start) - return 0; +static bool is_fair_policy(int policy) +{ + return policy == SCHED_NORMAL || policy == SCHED_BATCH; +} - trace_binder_update_page_range(proc, allocate, start, end); +static bool binder_supported_policy(int policy) +{ + return is_fair_policy(policy) || is_rt_policy(policy); +} - if (vma) - mm = NULL; +static int to_userspace_prio(int policy, int kernel_priority) +{ + if (is_fair_policy(policy)) + return PRIO_TO_NICE(kernel_priority); else - mm = get_task_mm(proc->tsk); + return MAX_USER_RT_PRIO - 1 - kernel_priority; +} - preempt_enable_no_resched(); +static int to_kernel_prio(int policy, int user_priority) +{ + if (is_fair_policy(policy)) + return NICE_TO_PRIO(user_priority); + else + return MAX_USER_RT_PRIO - 1 - user_priority; +} - if (mm) { - down_write(&mm->mmap_sem); - vma = proc->vma; - if (vma && mm != proc->vma_vm_mm) { - pr_err("%d: vma mm and task mm mismatch\n", - proc->pid); - vma = NULL; - } - } +static void binder_do_set_priority(struct task_struct *task, + struct binder_priority desired, + bool verify) +{ + int priority; /* user-space prio value */ + bool has_cap_nice; + unsigned int policy = desired.sched_policy; - if (allocate == 0) - goto free_range; + if (task->policy == policy && task->normal_prio == desired.prio) + return; - if (vma == NULL) { - pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - proc->pid); - goto err_no_vma; - } + has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - int ret; + priority = to_userspace_prio(policy, desired.prio); - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; + if (verify && is_rt_policy(policy) && !has_cap_nice) { + long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); - BUG_ON(*page); - *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); - if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", - proc->pid, page_addr); - goto err_alloc_page_failed; + if (max_rtprio == 0) { + policy = SCHED_NORMAL; + priority = MIN_NICE; + } else if (priority > max_rtprio) { + priority = max_rtprio; } - ret = map_kernel_range_noflush((unsigned long)page_addr, - PAGE_SIZE, PAGE_KERNEL, page); - flush_cache_vmap((unsigned long)page_addr, - (unsigned long)page_addr + PAGE_SIZE); - if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", - proc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + proc->user_buffer_offset; - ret = vm_insert_page(vma, user_page_addr, page[0]); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - proc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - /* vm_insert_page does not seem to increment the refcount */ - } - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); } - preempt_disable(); + if (verify && is_fair_policy(policy) && !has_cap_nice) { + long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE)); - return 0; - -free_range: - for (page_addr = end - PAGE_SIZE; page_addr >= start; - page_addr -= PAGE_SIZE) { - page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; - if (vma) - zap_page_range(vma, (uintptr_t)page_addr + - proc->user_buffer_offset, PAGE_SIZE, NULL); -err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: - __free_page(*page); - *page = NULL; -err_alloc_page_failed: - ; - } -err_no_vma: - if (mm) { - up_write(&mm->mmap_sem); - mmput(mm); - } - - preempt_disable(); - - return -ENOMEM; -} - -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) -{ - struct rb_node *n = proc->free_buffers.rb_node; - struct binder_buffer *buffer; - size_t buffer_size; - struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; - size_t size, data_offsets_size; - - if (proc->vma == NULL) { - pr_err("%d: binder_alloc_buf, no vma\n", - proc->pid); - return NULL; - } - - data_offsets_size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - - if (data_offsets_size < data_size || data_offsets_size < offsets_size) { - binder_user_error("%d: got transaction with invalid size %zd-%zd\n", - proc->pid, data_size, offsets_size); - return NULL; - } - size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); - if (size < data_offsets_size || size < extra_buffers_size) { - binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n", - proc->pid, extra_buffers_size); - return NULL; - } - if (is_async && - proc->free_async_space < size + sizeof(struct binder_buffer)) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd failed, no async space left\n", - proc->pid, size); - return NULL; - } - - while (n) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); - - if (size < buffer_size) { - best_fit = n; - n = n->rb_left; - } else if (size > buffer_size) - n = n->rb_right; - else { - best_fit = n; - break; + if (min_nice > MAX_NICE) { + binder_user_error("%d RLIMIT_NICE not set\n", + task->pid); + return; + } else if (priority < min_nice) { + priority = min_nice; } } - if (best_fit == NULL) { - pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - proc->pid, size); - return NULL; - } - if (n == NULL) { - buffer = rb_entry(best_fit, struct binder_buffer, rb_node); - buffer_size = binder_buffer_size(proc, buffer); - } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", - proc->pid, size, buffer, buffer_size); + if (policy != desired.sched_policy || + to_kernel_prio(policy, priority) != desired.prio) + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: priority %d not allowed, using %d instead\n", + task->pid, desired.prio, + to_kernel_prio(policy, priority)); - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); - if (n == NULL) { - if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) - buffer_size = size; /* no room for other buffers */ - else - buffer_size = size + sizeof(struct binder_buffer); - } - end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - if (binder_update_page_range(proc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) - return NULL; + /* Set the actual priority */ + if (task->policy != policy || is_rt_policy(policy)) { + struct sched_param params; - rb_erase(best_fit, &proc->free_buffers); - buffer->free = 0; - binder_insert_allocated_buffer(proc, buffer); - if (buffer_size != size) { - struct binder_buffer *new_buffer = (void *)buffer->data + size; + params.sched_priority = is_rt_policy(policy) ? priority : 0; - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(proc, new_buffer); + sched_setscheduler_nocheck(task, + policy | SCHED_RESET_ON_FORK, + ¶ms); } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", - proc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; - buffer->extra_buffers_size = extra_buffers_size; - buffer->async_transaction = is_async; - if (is_async) { - proc->free_async_space -= size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_alloc_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); - } - - return buffer; -} - -static void *buffer_start_page(struct binder_buffer *buffer) -{ - return (void *)((uintptr_t)buffer & PAGE_MASK); + if (is_fair_policy(policy)) + set_user_nice(task, priority); } -static void *buffer_end_page(struct binder_buffer *buffer) +static void binder_set_priority(struct task_struct *task, + struct binder_priority desired) { - return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); + binder_do_set_priority(task, desired, /* verify = */ true); } -static void binder_delete_free_buffer(struct binder_proc *proc, - struct binder_buffer *buffer) +static void binder_restore_priority(struct task_struct *task, + struct binder_priority desired) { - struct binder_buffer *prev, *next = NULL; - int free_page_end = 1; - int free_page_start = 1; - - BUG_ON(proc->buffers.next == &buffer->entry); - prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); - BUG_ON(!prev->free); - if (buffer_end_page(prev) == buffer_start_page(buffer)) { - free_page_start = 0; - if (buffer_end_page(prev) == buffer_end_page(buffer)) - free_page_end = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - - if (!list_is_last(&buffer->entry, &proc->buffers)) { - next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - if (buffer_start_page(next) == buffer_end_page(buffer)) { - free_page_end = 0; - if (buffer_start_page(next) == - buffer_start_page(buffer)) - free_page_start = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); - } - } - list_del(&buffer->entry); - if (free_page_start || free_page_end) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", - proc->pid, buffer, free_page_start ? "" : " end", - free_page_end ? "" : " start", prev, next); - binder_update_page_range(proc, 0, free_page_start ? - buffer_start_page(buffer) : buffer_end_page(buffer), - (free_page_end ? buffer_end_page(buffer) : - buffer_start_page(buffer)) + PAGE_SIZE, NULL); - } + binder_do_set_priority(task, desired, /* verify = */ false); } -static void binder_free_buf(struct binder_proc *proc, - struct binder_buffer *buffer) +static void binder_transaction_priority(struct task_struct *task, + struct binder_transaction *t, + struct binder_priority node_prio, + bool inherit_rt) { - size_t size, buffer_size; - - buffer_size = binder_buffer_size(proc, buffer); - - size = ALIGN(buffer->data_size, sizeof(void *)) + - ALIGN(buffer->offsets_size, sizeof(void *)) + - ALIGN(buffer->extra_buffers_size, sizeof(void *)); + struct binder_priority desired_prio; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", - proc->pid, buffer, size, buffer_size); - - BUG_ON(buffer->free); - BUG_ON(size > buffer_size); - BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < proc->buffer); - BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); + if (t->set_priority_called) + return; - if (buffer->async_transaction) { - proc->free_async_space += size + sizeof(struct binder_buffer); + t->set_priority_called = true; + t->saved_priority.sched_policy = task->policy; + t->saved_priority.prio = task->normal_prio; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, - "%d: binder_free_buf size %zd async free %zd\n", - proc->pid, size, proc->free_async_space); + if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { + desired_prio.prio = NICE_TO_PRIO(0); + desired_prio.sched_policy = SCHED_NORMAL; + } else { + desired_prio.prio = t->priority.prio; + desired_prio.sched_policy = t->priority.sched_policy; } - binder_update_page_range(proc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), - NULL); - rb_erase(&buffer->rb_node, &proc->allocated_buffers); - buffer->free = 1; - if (!list_is_last(&buffer->entry, &proc->buffers)) { - struct binder_buffer *next = list_entry(buffer->entry.next, - struct binder_buffer, entry); - - if (next->free) { - rb_erase(&next->rb_node, &proc->free_buffers); - binder_delete_free_buffer(proc, next); - } - } - if (proc->buffers.next != &buffer->entry) { - struct binder_buffer *prev = list_entry(buffer->entry.prev, - struct binder_buffer, entry); - - if (prev->free) { - binder_delete_free_buffer(proc, buffer); - rb_erase(&prev->rb_node, &proc->free_buffers); - buffer = prev; - } + if (node_prio.prio < t->priority.prio || + (node_prio.prio == t->priority.prio && + node_prio.sched_policy == SCHED_FIFO)) { + /* + * In case the minimum priority on the node is + * higher (lower value), use that priority. If + * the priority is the same, but the node uses + * SCHED_FIFO, prefer SCHED_FIFO, since it can + * run unbounded, unlike SCHED_RR. + */ + desired_prio = node_prio; } - binder_insert_free_buffer(proc, buffer); + + binder_set_priority(task, desired_prio); } -static struct binder_node *binder_get_node(struct binder_proc *proc, - binder_uintptr_t ptr) +static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, + binder_uintptr_t ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; + BUG_ON(!spin_is_locked(&proc->inner_lock)); + while (n) { node = rb_entry(n, struct binder_node, rb_node); @@ -996,21 +1233,46 @@ static struct binder_node *binder_get_node(struct binder_proc *proc, n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; - else + else { + /* + * take an implicit weak reference + * to ensure node stays alive until + * call to binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); return node; + } } return NULL; } -static struct binder_node *binder_new_node(struct binder_proc *proc, - binder_uintptr_t ptr, - binder_uintptr_t cookie) +static struct binder_node *binder_get_node(struct binder_proc *proc, + binder_uintptr_t ptr) +{ + struct binder_node *node; + + binder_inner_proc_lock(proc); + node = binder_get_node_ilocked(proc, ptr); + binder_inner_proc_unlock(proc); + return node; +} + +static struct binder_node *binder_init_node_ilocked( + struct binder_proc *proc, + struct binder_node *new_node, + struct flat_binder_object *fp) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; + binder_uintptr_t ptr = fp ? fp->binder : 0; + binder_uintptr_t cookie = fp ? fp->cookie : 0; + __u32 flags = fp ? fp->flags : 0; + s8 priority; + BUG_ON(!spin_is_locked(&proc->inner_lock)); while (*p) { + parent = *p; node = rb_entry(parent, struct binder_node, rb_node); @@ -1018,14 +1280,19 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; - else - return NULL; + else { + /* + * A matching node is already in + * the rb tree. Abandon the init + * and return it. + */ + binder_inc_node_tmpref_ilocked(node); + return node; + } } - - node = kzalloc_preempt_disabled(sizeof(*node)); - if (node == NULL) - return NULL; + node = new_node; binder_stats_created(BINDER_STAT_NODE); + node->tmp_refs++; rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = atomic_inc_return(&binder_last_id); @@ -1033,18 +1300,58 @@ static struct binder_node *binder_new_node(struct binder_proc *proc, node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; + priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->sched_policy = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) >> + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; + node->min_priority = to_kernel_prio(node->sched_policy, priority); + node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT); + spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx created\n", proc->pid, current->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); + return node; } -static int binder_inc_node(struct binder_node *node, int strong, int internal, - struct list_head *target_list) +static struct binder_node *binder_new_node(struct binder_proc *proc, + struct flat_binder_object *fp) +{ + struct binder_node *node; + struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (!new_node) + return NULL; + binder_inner_proc_lock(proc); + node = binder_init_node_ilocked(proc, new_node, fp); + binder_inner_proc_unlock(proc); + if (node != new_node) + /* + * The node was already added by another thread + */ + kfree(new_node); + + return node; +} + +static void binder_free_node(struct binder_node *node) +{ + kfree(node); + binder_stats_deleted(BINDER_STAT_NODE); +} + +static int binder_inc_node_nilocked(struct binder_node *node, int strong, + int internal, + struct list_head *target_list) { + struct binder_proc *proc = node->proc; + + BUG_ON(!spin_is_locked(&node->lock)); + if (proc) + BUG_ON(!spin_is_locked(&proc->inner_lock)); if (strong) { if (internal) { if (target_list == NULL && @@ -1061,8 +1368,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { - list_del_init(&node->work.entry); - list_add_tail(&node->work.entry, target_list); + binder_dequeue_work_ilocked(&node->work); + binder_enqueue_work_ilocked(&node->work, target_list); } } else { if (!internal) @@ -1073,58 +1380,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, node->debug_id); return -EINVAL; } - list_add_tail(&node->work.entry, target_list); + binder_enqueue_work_ilocked(&node->work, target_list); } } return 0; } -static int binder_dec_node(struct binder_node *node, int strong, int internal) +static int binder_inc_node(struct binder_node *node, int strong, int internal, + struct list_head *target_list) +{ + int ret; + + binder_node_inner_lock(node); + ret = binder_inc_node_nilocked(node, strong, internal, target_list); + binder_node_inner_unlock(node); + + return ret; +} + +static bool binder_dec_node_nilocked(struct binder_node *node, + int strong, int internal) { + struct binder_proc *proc = node->proc; + + BUG_ON(!spin_is_locked(&node->lock)); + if (proc) + BUG_ON(!spin_is_locked(&proc->inner_lock)); if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) - return 0; + return false; } else { if (!internal) node->local_weak_refs--; - if (node->local_weak_refs || !hlist_empty(&node->refs)) - return 0; + if (node->local_weak_refs || node->tmp_refs || + !hlist_empty(&node->refs)) + return false; } - if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { + + if (proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { - list_add_tail(&node->work.entry, &node->proc->todo); - wake_up_interruptible(&node->proc->wait); + binder_enqueue_work_ilocked(&node->work, &proc->todo); + binder_wakeup_proc_ilocked(proc); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && - !node->local_weak_refs) { - list_del_init(&node->work.entry); - if (node->proc) { - rb_erase(&node->rb_node, &node->proc->nodes); + !node->local_weak_refs && !node->tmp_refs) { + if (proc) { + binder_dequeue_work_ilocked(&node->work); + rb_erase(&node->rb_node, &proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "refless node %d deleted\n", node->debug_id); } else { + BUG_ON(!list_empty(&node->work.entry)); + spin_lock(&binder_dead_nodes_lock); + /* + * tmp_refs could have changed so + * check it again + */ + if (node->tmp_refs) { + spin_unlock(&binder_dead_nodes_lock); + return false; + } hlist_del(&node->dead_node); + spin_unlock(&binder_dead_nodes_lock); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "dead node %d deleted\n", node->debug_id); } - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); + return true; } } + return false; +} - return 0; +static void binder_dec_node(struct binder_node *node, int strong, int internal) +{ + bool free_node; + + binder_node_inner_lock(node); + free_node = binder_dec_node_nilocked(node, strong, internal); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_inc_node_tmpref_ilocked(struct binder_node *node) +{ + /* + * No call to binder_inc_node() is needed since we + * don't need to inform userspace of any changes to + * tmp_refs + */ + node->tmp_refs++; } +/** + * binder_inc_node_tmpref() - take a temporary reference on node + * @node: node to reference + * + * Take reference on node to prevent the node from being freed + * while referenced only by a local variable. The inner lock is + * needed to serialize with the node work on the queue (which + * isn't needed after the node is dead). If the node is dead + * (node->proc is NULL), use binder_dead_nodes_lock to protect + * node->tmp_refs against dead-node-only cases where the node + * lock cannot be acquired (eg traversing the dead node list to + * print nodes) + */ +static void binder_inc_node_tmpref(struct binder_node *node) +{ + binder_node_lock(node); + if (node->proc) + binder_inner_proc_lock(node->proc); + else + spin_lock(&binder_dead_nodes_lock); + binder_inc_node_tmpref_ilocked(node); + if (node->proc) + binder_inner_proc_unlock(node->proc); + else + spin_unlock(&binder_dead_nodes_lock); + binder_node_unlock(node); +} -static struct binder_ref *binder_get_ref(struct binder_proc *proc, - u32 desc, bool need_strong_ref) +/** + * binder_dec_node_tmpref() - remove a temporary reference on node + * @node: node to reference + * + * Release temporary reference on node taken via binder_inc_node_tmpref() + */ +static void binder_dec_node_tmpref(struct binder_node *node) +{ + bool free_node; + + binder_node_inner_lock(node); + if (!node->proc) + spin_lock(&binder_dead_nodes_lock); + node->tmp_refs--; + BUG_ON(node->tmp_refs < 0); + if (!node->proc) + spin_unlock(&binder_dead_nodes_lock); + /* + * Call binder_dec_node() to check if all refcounts are 0 + * and cleanup is needed. Calling with strong=0 and internal=1 + * causes no actual reference to be released in binder_dec_node(). + * If that changes, a change is needed here too. + */ + free_node = binder_dec_node_nilocked(node, 0, 1); + binder_node_inner_unlock(node); + if (free_node) + binder_free_node(node); +} + +static void binder_put_node(struct binder_node *node) +{ + binder_dec_node_tmpref(node); +} + +static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, + u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; @@ -1132,11 +1550,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (desc < ref->desc) { + if (desc < ref->data.desc) { n = n->rb_left; - } else if (desc > ref->desc) { + } else if (desc > ref->data.desc) { n = n->rb_right; - } else if (need_strong_ref && !ref->strong) { + } else if (need_strong_ref && !ref->data.strong) { binder_user_error("tried to use weak ref as strong ref\n"); return NULL; } else { @@ -1146,14 +1564,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc, return NULL; } -static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, - struct binder_node *node) +/** + * binder_get_ref_for_node_olocked() - get the ref associated with given node + * @proc: binder_proc that owns the ref + * @node: binder_node of target + * @new_ref: newly allocated binder_ref to be initialized or %NULL + * + * Look up the ref for the given node and return it if it exists + * + * If it doesn't exist and the caller provides a newly allocated + * ref, initialize the fields of the newly allocated ref and insert + * into the given proc rb_trees and node refs list. + * + * Return: the ref for node. It is possible that another thread + * allocated/initialized the ref first in which case the + * returned ref would be different than the passed-in + * new_ref. new_ref must be kfree'd by the caller in + * this case. + */ +static struct binder_ref *binder_get_ref_for_node_olocked( + struct binder_proc *proc, + struct binder_node *node, + struct binder_ref *new_ref) { - struct rb_node *n; + struct binder_context *context = proc->context; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; - struct binder_ref *ref, *new_ref; - struct binder_context *context = proc->context; + struct binder_ref *ref; + struct rb_node *n; while (*p) { parent = *p; @@ -1166,22 +1604,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, else return ref; } - new_ref = kzalloc_preempt_disabled(sizeof(*ref)); - if (new_ref == NULL) + if (!new_ref) return NULL; + binder_stats_created(BINDER_STAT_REF); - new_ref->debug_id = atomic_inc_return(&binder_last_id); + new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); - new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1; + new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); - if (ref->desc > new_ref->desc) + if (ref->data.desc > new_ref->data.desc) break; - new_ref->desc = ref->desc + 1; + new_ref->data.desc = ref->data.desc + 1; } p = &proc->refs_by_desc.rb_node; @@ -1189,121 +1627,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); - if (new_ref->desc < ref->desc) + if (new_ref->data.desc < ref->data.desc) p = &(*p)->rb_left; - else if (new_ref->desc > ref->desc) + else if (new_ref->data.desc > ref->data.desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); - if (node) { - hlist_add_head(&new_ref->node_entry, &node->refs); - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for node %d\n", - proc->pid, new_ref->debug_id, new_ref->desc, - node->debug_id); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d new ref %d desc %d for dead node\n", - proc->pid, new_ref->debug_id, new_ref->desc); - } + binder_node_lock(node); + hlist_add_head(&new_ref->node_entry, &node->refs); + + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d new ref %d desc %d for node %d\n", + proc->pid, new_ref->data.debug_id, new_ref->data.desc, + node->debug_id); + binder_node_unlock(node); return new_ref; } -static void binder_delete_ref(struct binder_ref *ref) +static void binder_cleanup_ref_olocked(struct binder_ref *ref) { + bool delete_node = false; + binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d delete ref %d desc %d for node %d\n", - ref->proc->pid, ref->debug_id, ref->desc, + ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); - if (ref->strong) - binder_dec_node(ref->node, 1, 1); + + binder_node_inner_lock(ref->node); + if (ref->data.strong) + binder_dec_node_nilocked(ref->node, 1, 1); + hlist_del(&ref->node_entry); - binder_dec_node(ref->node, 0, 1); + delete_node = binder_dec_node_nilocked(ref->node, 0, 1); + binder_node_inner_unlock(ref->node); + /* + * Clear ref->node unless we want the caller to free the node + */ + if (!delete_node) { + /* + * The caller uses ref->node to determine + * whether the node needs to be freed. Clear + * it since the node is still alive. + */ + ref->node = NULL; + } + if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n", - ref->proc->pid, ref->debug_id, ref->desc); - list_del(&ref->death->work.entry); - kfree(ref->death); + ref->proc->pid, ref->data.debug_id, + ref->data.desc); + binder_dequeue_work(ref->proc, &ref->death->work); binder_stats_deleted(BINDER_STAT_DEATH); } - kfree(ref); binder_stats_deleted(BINDER_STAT_REF); } -static int binder_inc_ref(struct binder_ref *ref, int strong, - struct list_head *target_list) +/** + * binder_inc_ref_olocked() - increment the ref for given handle + * @ref: ref to be incremented + * @strong: if true, strong increment, else weak + * @target_list: list to queue node work on + * + * Increment the ref. @ref->proc->outer_lock must be held on entry + * + * Return: 0, if successful, else errno + */ +static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, + struct list_head *target_list) { int ret; if (strong) { - if (ref->strong == 0) { + if (ref->data.strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } - ref->strong++; + ref->data.strong++; } else { - if (ref->weak == 0) { + if (ref->data.weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } - ref->weak++; + ref->data.weak++; } return 0; } - -static int binder_dec_ref(struct binder_ref *ref, int strong) +/** + * binder_dec_ref() - dec the ref for given handle + * @ref: ref to be decremented + * @strong: if true, strong decrement, else weak + * + * Decrement the ref. + * + * Return: true if ref is cleaned up and ready to be freed + */ +static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) { if (strong) { - if (ref->strong == 0) { + if (ref->data.strong == 0) { binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; - } - ref->strong--; - if (ref->strong == 0) { - int ret; - - ret = binder_dec_node(ref->node, strong, 1); - if (ret) - return ret; + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; } + ref->data.strong--; + if (ref->data.strong == 0) + binder_dec_node(ref->node, strong, 1); } else { - if (ref->weak == 0) { + if (ref->data.weak == 0) { binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", - ref->proc->pid, ref->debug_id, - ref->desc, ref->strong, ref->weak); - return -EINVAL; + ref->proc->pid, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak); + return false; } - ref->weak--; + ref->data.weak--; } - if (ref->strong == 0 && ref->weak == 0) - binder_delete_ref(ref); - return 0; + if (ref->data.strong == 0 && ref->data.weak == 0) { + binder_cleanup_ref_olocked(ref); + return true; + } + return false; } -static void binder_pop_transaction(struct binder_thread *target_thread, - struct binder_transaction *t) +/** + * binder_get_node_from_ref() - get the node from the given proc/desc + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @need_strong_ref: if true, only return node if ref is strong + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, return the associated binder_node + * + * Return: a binder_node or NULL if not found or not strong when strong required + */ +static struct binder_node *binder_get_node_from_ref( + struct binder_proc *proc, + u32 desc, bool need_strong_ref, + struct binder_ref_data *rdata) { - if (target_thread) { - BUG_ON(target_thread->transaction_stack != t); - BUG_ON(target_thread->transaction_stack->from != target_thread); - target_thread->transaction_stack = - target_thread->transaction_stack->from_parent; - t->from = NULL; + struct binder_node *node; + struct binder_ref *ref; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, need_strong_ref); + if (!ref) + goto err_no_ref; + node = ref->node; + /* + * Take an implicit reference on the node to ensure + * it stays alive until the call to binder_put_node() + */ + binder_inc_node_tmpref(node); + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + return node; + +err_no_ref: + binder_proc_unlock(proc); + return NULL; +} + +/** + * binder_free_ref() - free the binder_ref + * @ref: ref to free + * + * Free the binder_ref. Free the binder_node indicated by ref->node + * (if non-NULL) and the binder_ref_death indicated by ref->death. + */ +static void binder_free_ref(struct binder_ref *ref) +{ + if (ref->node) + binder_free_node(ref->node); + kfree(ref->death); + kfree(ref); +} + +/** + * binder_update_ref_for_handle() - inc/dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @increment: true=inc reference, false=dec reference + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Given a proc and ref handle, increment or decrement the ref + * according to "increment" arg. + * + * Return: 0 if successful, else errno + */ +static int binder_update_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool increment, bool strong, + struct binder_ref_data *rdata) +{ + int ret = 0; + struct binder_ref *ref; + bool delete_ref = false; + + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, desc, strong); + if (!ref) { + ret = -EINVAL; + goto err_no_ref; + } + if (increment) + ret = binder_inc_ref_olocked(ref, strong, NULL); + else + delete_ref = binder_dec_ref_olocked(ref, strong); + + if (rdata) + *rdata = ref->data; + binder_proc_unlock(proc); + + if (delete_ref) + binder_free_ref(ref); + return ret; + +err_no_ref: + binder_proc_unlock(proc); + return ret; +} + +/** + * binder_dec_ref_for_handle() - dec the ref for given handle + * @proc: proc containing the ref + * @desc: the handle associated with the ref + * @strong: true=strong reference, false=weak reference + * @rdata: the id/refcount data for the ref + * + * Just calls binder_update_ref_for_handle() to decrement the ref. + * + * Return: 0 if successful, else errno + */ +static int binder_dec_ref_for_handle(struct binder_proc *proc, + uint32_t desc, bool strong, struct binder_ref_data *rdata) +{ + return binder_update_ref_for_handle(proc, desc, false, strong, rdata); +} + + +/** + * binder_inc_ref_for_node() - increment the ref for given proc/node + * @proc: proc containing the ref + * @node: target node + * @strong: true=strong reference, false=weak reference + * @target_list: worklist to use if node is incremented + * @rdata: the id/refcount data for the ref + * + * Given a proc and node, increment the ref. Create the ref if it + * doesn't already exist + * + * Return: 0 if successful, else errno + */ +static int binder_inc_ref_for_node(struct binder_proc *proc, + struct binder_node *node, + bool strong, + struct list_head *target_list, + struct binder_ref_data *rdata) +{ + struct binder_ref *ref; + struct binder_ref *new_ref = NULL; + int ret = 0; + + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, NULL); + if (!ref) { + binder_proc_unlock(proc); + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!new_ref) + return -ENOMEM; + binder_proc_lock(proc); + ref = binder_get_ref_for_node_olocked(proc, node, new_ref); + } + ret = binder_inc_ref_olocked(ref, strong, target_list); + *rdata = ref->data; + binder_proc_unlock(proc); + if (new_ref && ref != new_ref) + /* + * Another thread created the ref first so + * free the one we allocated + */ + kfree(new_ref); + return ret; +} + +static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, + struct binder_transaction *t) +{ + BUG_ON(!target_thread); + BUG_ON(!spin_is_locked(&target_thread->proc->inner_lock)); + BUG_ON(target_thread->transaction_stack != t); + BUG_ON(target_thread->transaction_stack->from != target_thread); + target_thread->transaction_stack = + target_thread->transaction_stack->from_parent; + t->from = NULL; +} + +/** + * binder_thread_dec_tmpref() - decrement thread->tmp_ref + * @thread: thread to decrement + * + * A thread needs to be kept alive while being used to create or + * handle a transaction. binder_get_txn_from() is used to safely + * extract t->from from a binder_transaction and keep the thread + * indicated by t->from from being freed. When done with that + * binder_thread, this function is called to decrement the + * tmp_ref and free if appropriate (thread has been released + * and no transaction being processed by the driver) + */ +static void binder_thread_dec_tmpref(struct binder_thread *thread) +{ + /* + * atomic is used to protect the counter value while + * it cannot reach zero or thread->is_dead is false + */ + binder_inner_proc_lock(thread->proc); + atomic_dec(&thread->tmp_ref); + if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { + binder_inner_proc_unlock(thread->proc); + binder_free_thread(thread); + return; } - t->need_reply = 0; + binder_inner_proc_unlock(thread->proc); +} + +/** + * binder_proc_dec_tmpref() - decrement proc->tmp_ref + * @proc: proc to decrement + * + * A binder_proc needs to be kept alive while being used to create or + * handle a transaction. proc->tmp_ref is incremented when + * creating a new transaction or the binder_proc is currently in-use + * by threads that are being released. When done with the binder_proc, + * this function is called to decrement the counter and free the + * proc if appropriate (proc has been released, all threads have + * been released and not currenly in-use to process a transaction). + */ +static void binder_proc_dec_tmpref(struct binder_proc *proc) +{ + binder_inner_proc_lock(proc); + proc->tmp_ref--; + if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && + !proc->tmp_ref) { + binder_inner_proc_unlock(proc); + binder_free_proc(proc); + return; + } + binder_inner_proc_unlock(proc); +} + +/** + * binder_get_txn_from() - safely extract the "from" thread in transaction + * @t: binder transaction for t->from + * + * Atomically return the "from" thread and increment the tmp_ref + * count for the thread to ensure it stays alive until + * binder_thread_dec_tmpref() is called. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from( + struct binder_transaction *t) +{ + struct binder_thread *from; + + spin_lock(&t->lock); + from = t->from; + if (from) + atomic_inc(&from->tmp_ref); + spin_unlock(&t->lock); + return from; +} + +/** + * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock + * @t: binder transaction for t->from + * + * Same as binder_get_txn_from() except it also acquires the proc->inner_lock + * to guarantee that the thread cannot be released while operating on it. + * The caller must call binder_inner_proc_unlock() to release the inner lock + * as well as call binder_dec_thread_txn() to release the reference. + * + * Return: the value of t->from + */ +static struct binder_thread *binder_get_txn_from_and_acq_inner( + struct binder_transaction *t) +{ + struct binder_thread *from; + + from = binder_get_txn_from(t); + if (!from) + return NULL; + binder_inner_proc_lock(from->proc); + if (t->from) { + BUG_ON(from != t->from); + return from; + } + binder_inner_proc_unlock(from->proc); + binder_thread_dec_tmpref(from); + return NULL; +} + +static void binder_free_transaction(struct binder_transaction *t) +{ if (t->buffer) t->buffer->transaction = NULL; kfree(t); @@ -1318,30 +2058,28 @@ static void binder_send_failed_reply(struct binder_transaction *t, BUG_ON(t->flags & TF_ONE_WAY); while (1) { - target_thread = t->from; + target_thread = binder_get_txn_from_and_acq_inner(t); if (target_thread) { - if (target_thread->return_error != BR_OK && - target_thread->return_error2 == BR_OK) { - target_thread->return_error2 = - target_thread->return_error; - target_thread->return_error = BR_OK; - } - if (target_thread->return_error == BR_OK) { - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "send failed reply for transaction %d to %d:%d\n", - t->debug_id, - target_thread->proc->pid, - target_thread->pid); - - binder_pop_transaction(target_thread, t); - target_thread->return_error = error_code; + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "send failed reply for transaction %d to %d:%d\n", + t->debug_id, + target_thread->proc->pid, + target_thread->pid); + + binder_pop_transaction_ilocked(target_thread, t); + if (target_thread->reply_error.cmd == BR_OK) { + target_thread->reply_error.cmd = error_code; + binder_enqueue_work_ilocked( + &target_thread->reply_error.work, + &target_thread->todo); wake_up_interruptible(&target_thread->wait); } else { - pr_err("reply failed, target thread, %d:%d, has error code %d already\n", - target_thread->proc->pid, - target_thread->pid, - target_thread->return_error); + WARN(1, "Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } + binder_inner_proc_unlock(target_thread->proc); + binder_thread_dec_tmpref(target_thread); + binder_free_transaction(t); return; } next = t->from_parent; @@ -1350,7 +2088,7 @@ static void binder_send_failed_reply(struct binder_transaction *t, "send failed reply for transaction %d, target dead\n", t->debug_id); - binder_pop_transaction(target_thread, t); + binder_free_transaction(t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); @@ -1559,25 +2297,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, node->debug_id, (u64)node->ptr); binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 0); + binder_put_node(node); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct flat_binder_object *fp; - struct binder_ref *ref; + struct binder_ref_data rdata; + int ret; fp = to_flat_binder_object(hdr); - ref = binder_get_ref(proc, fp->handle, - hdr->type == BINDER_TYPE_HANDLE); + ret = binder_dec_ref_for_handle(proc, fp->handle, + hdr->type == BINDER_TYPE_HANDLE, &rdata); - if (ref == NULL) { - pr_err("transaction release %d bad handle %d\n", - debug_id, fp->handle); + if (ret) { + pr_err("transaction release %d bad handle %d, ret = %d\n", + debug_id, fp->handle, ret); break; } binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, ref->node->debug_id); - binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE); + " ref %d desc %d\n", + rdata.debug_id, rdata.desc); } break; case BINDER_TYPE_FD: { @@ -1616,7 +2355,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * back to kernel address space to access it */ parent_buffer = parent->buffer - - proc->user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &proc->alloc); fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -1648,102 +2388,122 @@ static int binder_translate_binder(struct flat_binder_object *fp, struct binder_thread *thread) { struct binder_node *node; - struct binder_ref *ref; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_ref_data rdata; + int ret = 0; node = binder_get_node(proc, fp->binder); if (!node) { - node = binder_new_node(proc, fp->binder, fp->cookie); + node = binder_new_node(proc, fp); if (!node) return -ENOMEM; - - node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; - node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)fp->binder, node->debug_id, (u64)fp->cookie, (u64)node->cookie); - return -EINVAL; + ret = -EINVAL; + goto done; + } + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) - return -EPERM; - ref = binder_get_ref_for_node(target_proc, node); - if (!ref) - return -EINVAL; + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_BINDER, + &thread->todo, &rdata); + if (ret) + goto done; if (fp->hdr.type == BINDER_TYPE_BINDER) fp->hdr.type = BINDER_TYPE_HANDLE; else fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; fp->binder = 0; - fp->handle = ref->desc; + fp->handle = rdata.desc; fp->cookie = 0; - binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo); - trace_binder_transaction_node_to_ref(t, node, ref); + trace_binder_transaction_node_to_ref(t, node, &rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx -> ref %d desc %d\n", node->debug_id, (u64)node->ptr, - ref->debug_id, ref->desc); - - return 0; + rdata.debug_id, rdata.desc); +done: + binder_put_node(node); + return ret; } static int binder_translate_handle(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) { - struct binder_ref *ref; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_node *node; + struct binder_ref_data src_rdata; + int ret = 0; - ref = binder_get_ref(proc, fp->handle, - fp->hdr.type == BINDER_TYPE_HANDLE); - if (!ref) { + node = binder_get_node_from_ref(proc, fp->handle, + fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); + if (!node) { binder_user_error("%d:%d got transaction with invalid handle, %d\n", proc->pid, thread->pid, fp->handle); return -EINVAL; } - if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) - return -EPERM; + if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { + ret = -EPERM; + goto done; + } - if (ref->node->proc == target_proc) { + binder_node_lock(node); + if (node->proc == target_proc) { if (fp->hdr.type == BINDER_TYPE_HANDLE) fp->hdr.type = BINDER_TYPE_BINDER; else fp->hdr.type = BINDER_TYPE_WEAK_BINDER; - fp->binder = ref->node->ptr; - fp->cookie = ref->node->cookie; - binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER, - 0, NULL); - trace_binder_transaction_ref_to_node(t, ref); + fp->binder = node->ptr; + fp->cookie = node->cookie; + if (node->proc) + binder_inner_proc_lock(node->proc); + binder_inc_node_nilocked(node, + fp->hdr.type == BINDER_TYPE_BINDER, + 0, NULL); + if (node->proc) + binder_inner_proc_unlock(node->proc); + trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", - ref->debug_id, ref->desc, ref->node->debug_id, - (u64)ref->node->ptr); + src_rdata.debug_id, src_rdata.desc, node->debug_id, + (u64)node->ptr); + binder_node_unlock(node); } else { - struct binder_ref *new_ref; + int ret; + struct binder_ref_data dest_rdata; - new_ref = binder_get_ref_for_node(target_proc, ref->node); - if (!new_ref) - return -EINVAL; + binder_node_unlock(node); + ret = binder_inc_ref_for_node(target_proc, node, + fp->hdr.type == BINDER_TYPE_HANDLE, + NULL, &dest_rdata); + if (ret) + goto done; fp->binder = 0; - fp->handle = new_ref->desc; + fp->handle = dest_rdata.desc; fp->cookie = 0; - binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE, - NULL); - trace_binder_transaction_ref_to_ref(t, ref, new_ref); + trace_binder_transaction_ref_to_ref(t, node, &src_rdata, + &dest_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, new_ref->debug_id, - new_ref->desc, ref->node->debug_id); + src_rdata.debug_id, src_rdata.desc, + dest_rdata.debug_id, dest_rdata.desc, + node->debug_id); } - return 0; +done: + binder_put_node(node); + return ret; } static int binder_translate_fd(int fd, @@ -1834,7 +2594,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, * Since the parent was already fixed up, convert it * back to the kernel address space to access it */ - parent_buffer = parent->buffer - target_proc->user_buffer_offset; + parent_buffer = parent->buffer - + binder_alloc_get_user_buffer_offset(&target_proc->alloc); fd_array = (u32 *)(parent_buffer + fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", @@ -1902,12 +2663,87 @@ static int binder_fixup_parent(struct binder_transaction *t, return -EINVAL; } parent_buffer = (u8 *)(parent->buffer - - target_proc->user_buffer_offset); + binder_alloc_get_user_buffer_offset( + &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; return 0; } +/** + * binder_proc_transaction() - sends a transaction to a process and wakes it up + * @t: transaction to send + * @proc: process to send the transaction to + * @thread: thread in @proc to send the transaction to (may be NULL) + * + * This function queues a transaction to the specified process. It will try + * to find a thread in the target process to handle the transaction and + * wake it up. If no thread is found, the work is queued to the proc + * waitqueue. + * + * If the @thread parameter is not NULL, the transaction is always queued + * to the waitlist of that specific thread. + * + * Return: true if the transactions was successfully queued + * false if the target process or thread is dead + */ +static bool binder_proc_transaction(struct binder_transaction *t, + struct binder_proc *proc, + struct binder_thread *thread) +{ + struct list_head *target_list = NULL; + struct binder_node *node = t->buffer->target_node; + struct binder_priority node_prio; + bool oneway = !!(t->flags & TF_ONE_WAY); + bool wakeup = true; + + BUG_ON(!node); + binder_node_lock(node); + node_prio.prio = node->min_priority; + node_prio.sched_policy = node->sched_policy; + + if (oneway) { + BUG_ON(thread); + if (node->has_async_transaction) { + target_list = &node->async_todo; + wakeup = false; + } else { + node->has_async_transaction = 1; + } + } + + binder_inner_proc_lock(proc); + + if (proc->is_dead || (thread && thread->is_dead)) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + return false; + } + + if (!thread && !target_list) + thread = binder_select_thread_ilocked(proc); + + if (thread) { + target_list = &thread->todo; + binder_transaction_priority(thread->task, t, node_prio, + node->inherit_rt); + } else if (!target_list) { + target_list = &proc->todo; + } else { + BUG_ON(target_list != &node->async_todo); + } + + binder_enqueue_work_ilocked(&t->work, target_list); + + if (wakeup) + binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); + + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + + return true; +} + static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, @@ -1919,19 +2755,21 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t *offp, *off_end, *off_start; binder_size_t off_min; u8 *sg_bufp, *sg_buf_end; - struct binder_proc *target_proc; + struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; - struct list_head *target_list; - wait_queue_head_t *target_wait; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; - uint32_t return_error; + uint32_t return_error = 0; + uint32_t return_error_param = 0; + uint32_t return_error_line = 0; struct binder_buffer_object *last_fixup_obj = NULL; binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; + int t_debug_id = atomic_inc_return(&binder_last_id); - e = binder_transaction_log_add(&context->transaction_log); + e = binder_transaction_log_add(&binder_transaction_log); + e->debug_id = t_debug_id; e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; @@ -1941,29 +2779,39 @@ static void binder_transaction(struct binder_proc *proc, e->context_name = proc->context->name; if (reply) { + binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { + binder_inner_proc_unlock(proc); binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; goto err_empty_call_stack; } - binder_set_nice(in_reply_to->saved_priority); if (in_reply_to->to_thread != thread) { + spin_lock(&in_reply_to->lock); binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); + spin_unlock(&in_reply_to->lock); + binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; - target_thread = in_reply_to->from; + binder_inner_proc_unlock(proc); + target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { @@ -1972,89 +2820,137 @@ static void binder_transaction(struct binder_proc *proc, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); + binder_inner_proc_unlock(target_thread->proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_thread->proc); } else { if (tr->target.handle) { struct binder_ref *ref; - ref = binder_get_ref(proc, tr->target.handle, true); - if (ref == NULL) { + /* + * There must already be a strong ref + * on this node. If so, do a strong + * increment on the node to ensure it + * stays alive until the transaction is + * done. + */ + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, tr->target.handle, + true); + if (ref) { + binder_inc_node(ref->node, 1, 0, NULL); + target_node = ref->node; + } + binder_proc_unlock(proc); + if (target_node == NULL) { binder_user_error("%d:%d got transaction to invalid handle\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_invalid_target_handle; } - target_node = ref->node; } else { + mutex_lock(&context->context_mgr_node_lock); target_node = context->binder_context_mgr_node; if (target_node == NULL) { return_error = BR_DEAD_REPLY; + mutex_unlock(&context->context_mgr_node_lock); + return_error_line = __LINE__; goto err_no_context_mgr_node; } + binder_inc_node(target_node, 1, 0, NULL); + mutex_unlock(&context->context_mgr_node_lock); } e->to_node = target_node->debug_id; + binder_node_lock(target_node); target_proc = target_node->proc; if (target_proc == NULL) { + binder_node_unlock(target_node); return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; goto err_dead_binder; } + binder_inner_proc_lock(target_proc); + target_proc->tmp_ref++; + binder_inner_proc_unlock(target_proc); + binder_node_unlock(target_node); if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { return_error = BR_FAILED_REPLY; + return_error_param = -EPERM; + return_error_line = __LINE__; goto err_invalid_target_handle; } + binder_inner_proc_lock(proc); if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { + spin_lock(&tmp->lock); binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); + spin_unlock(&tmp->lock); + binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; + return_error_param = -EPROTO; + return_error_line = __LINE__; goto err_bad_call_stack; } while (tmp) { - if (tmp->from && tmp->from->proc == target_proc) - target_thread = tmp->from; + struct binder_thread *from; + + spin_lock(&tmp->lock); + from = tmp->from; + if (from && from->proc == target_proc) { + atomic_inc(&from->tmp_ref); + target_thread = from; + spin_unlock(&tmp->lock); + break; + } + spin_unlock(&tmp->lock); tmp = tmp->from_parent; } } + binder_inner_proc_unlock(proc); } - if (target_thread) { + if (target_thread) e->to_thread = target_thread->pid; - target_list = &target_thread->todo; - target_wait = &target_thread->wait; - } else { - target_list = &target_proc->todo; - target_wait = &target_proc->wait; - } e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ - t = kzalloc_preempt_disabled(sizeof(*t)); + t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; goto err_alloc_t_failed; } binder_stats_created(BINDER_STAT_TRANSACTION); + spin_lock_init(&t->lock); - tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete)); + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { return_error = BR_FAILED_REPLY; + return_error_param = -ENOMEM; + return_error_line = __LINE__; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); - t->debug_id = atomic_inc_return(&binder_last_id); - e->debug_id = t->debug_id; + t->debug_id = t_debug_id; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, @@ -2084,15 +2980,30 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; - t->priority = task_nice(current); + if (!(t->flags & TF_ONE_WAY) && + binder_supported_policy(current->policy)) { + /* Inherit supported policies for synchronous transactions */ + t->priority.sched_policy = current->policy; + t->priority.prio = current->normal_prio; + } else { + /* Otherwise, fall back to the default priority */ + t->priority = target_proc->default_priority; + } trace_binder_transaction(reply, t, target_node); - t->buffer = binder_alloc_buf(target_proc, tr->data_size, + t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY)); - if (t->buffer == NULL) { - return_error = BR_FAILED_REPLY; + if (IS_ERR(t->buffer)) { + /* + * -ESRCH indicates VMA cleared. The target is dying. + */ + return_error_param = PTR_ERR(t->buffer); + return_error = return_error_param == -ESRCH ? + BR_DEAD_REPLY : BR_FAILED_REPLY; + return_error_line = __LINE__; + t->buffer = NULL; goto err_binder_alloc_buf_failed; } t->buffer->allow_user_free = 0; @@ -2100,31 +3011,34 @@ static void binder_transaction(struct binder_proc *proc, t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); - if (target_node) - binder_inc_node(target_node, 1, 0, NULL); - off_start = (binder_size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); offp = off_start; - if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t) + if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) tr->data.ptr.buffer, tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; goto err_copy_data_failed; } - if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t) + if (copy_from_user(offp, (const void __user *)(uintptr_t) tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EFAULT; + return_error_line = __LINE__; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { @@ -2132,6 +3046,8 @@ static void binder_transaction(struct binder_proc *proc, proc->pid, thread->pid, (u64)extra_buffers_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } off_end = (void *)off_start + tr->offsets_size; @@ -2148,6 +3064,8 @@ static void binder_transaction(struct binder_proc *proc, (u64)off_min, (u64)t->buffer->data_size); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } @@ -2162,6 +3080,8 @@ static void binder_transaction(struct binder_proc *proc, ret = binder_translate_binder(fp, t, thread); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } } break; @@ -2173,6 +3093,8 @@ static void binder_transaction(struct binder_proc *proc, ret = binder_translate_handle(fp, t, thread); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } } break; @@ -2184,6 +3106,8 @@ static void binder_transaction(struct binder_proc *proc, if (target_fd < 0) { return_error = BR_FAILED_REPLY; + return_error_param = target_fd; + return_error_line = __LINE__; goto err_translate_failed; } fp->pad_binder = 0; @@ -2200,6 +3124,8 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_parent; } if (!binder_validate_fixup(t->buffer, off_start, @@ -2209,12 +3135,16 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_parent; } ret = binder_translate_fd_array(fda, parent, t, thread, in_reply_to); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj = parent; @@ -2230,20 +3160,24 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with too large buffer\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_offset; } - if (copy_from_user_preempt_disabled( - sg_bufp, - (const void __user *)(uintptr_t) - bp->buffer, bp->length)) { + if (copy_from_user(sg_bufp, + (const void __user *)(uintptr_t) + bp->buffer, bp->length)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); + return_error_param = -EFAULT; return_error = BR_FAILED_REPLY; + return_error_line = __LINE__; goto err_copy_data_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t)sg_bufp + - target_proc->user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &target_proc->alloc); sg_bufp += ALIGN(bp->length, sizeof(u64)); ret = binder_fixup_parent(t, thread, bp, off_start, @@ -2252,6 +3186,8 @@ static void binder_transaction(struct binder_proc *proc, last_fixup_min_off); if (ret < 0) { return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj = bp; @@ -2261,42 +3197,61 @@ static void binder_transaction(struct binder_proc *proc, binder_user_error("%d:%d got transaction with invalid object type, %x\n", proc->pid, thread->pid, hdr->type); return_error = BR_FAILED_REPLY; + return_error_param = -EINVAL; + return_error_line = __LINE__; goto err_bad_object_type; } } + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; + binder_enqueue_work(proc, tcomplete, &thread->todo); + t->work.type = BINDER_WORK_TRANSACTION; + if (reply) { + binder_inner_proc_lock(target_proc); + if (target_thread->is_dead) { + binder_inner_proc_unlock(target_proc); + goto err_dead_proc_or_thread; + } BUG_ON(t->buffer->async_transaction != 0); - binder_pop_transaction(target_thread, in_reply_to); + binder_pop_transaction_ilocked(target_thread, in_reply_to); + binder_enqueue_work_ilocked(&t->work, &target_thread->todo); + binder_inner_proc_unlock(target_proc); + wake_up_interruptible_sync(&target_thread->wait); + binder_restore_priority(current, in_reply_to->saved_priority); + binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); + binder_inner_proc_lock(proc); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; + binder_inner_proc_unlock(proc); + if (!binder_proc_transaction(t, target_proc, target_thread)) { + binder_inner_proc_lock(proc); + binder_pop_transaction_ilocked(thread, t); + binder_inner_proc_unlock(proc); + goto err_dead_proc_or_thread; + } } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); - if (target_node->has_async_transaction) { - target_list = &target_node->async_todo; - target_wait = NULL; - } else - target_node->has_async_transaction = 1; - } - t->work.type = BINDER_WORK_TRANSACTION; - list_add_tail(&t->work.entry, target_list); - tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; - list_add_tail(&tcomplete->entry, &thread->todo); - if (target_wait) { - if (reply || !(t->flags & TF_ONE_WAY)) { - preempt_disable(); - wake_up_interruptible_sync(target_wait); - preempt_enable_no_resched(); - } - else { - wake_up_interruptible(target_wait); - } + if (!binder_proc_transaction(t, target_proc, NULL)) + goto err_dead_proc_or_thread; } + if (target_thread) + binder_thread_dec_tmpref(target_thread); + binder_proc_dec_tmpref(target_proc); + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); return; +err_dead_proc_or_thread: + return_error = BR_DEAD_REPLY; + return_error_line = __LINE__; err_translate_failed: err_bad_object_type: err_bad_offset: @@ -2304,8 +3259,9 @@ err_bad_parent: err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); + target_node = NULL; t->buffer->transaction = NULL; - binder_free_buf(target_proc, t->buffer); + binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); @@ -2318,25 +3274,50 @@ err_empty_call_stack: err_dead_binder: err_invalid_target_handle: err_no_context_mgr_node: + if (target_thread) + binder_thread_dec_tmpref(target_thread); + if (target_proc) + binder_proc_dec_tmpref(target_proc); + if (target_node) + binder_dec_node(target_node, 1, 0); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d transaction failed %d, size %lld-%lld\n", - proc->pid, thread->pid, return_error, - (u64)tr->data_size, (u64)tr->offsets_size); + "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", + proc->pid, thread->pid, return_error, return_error_param, + (u64)tr->data_size, (u64)tr->offsets_size, + return_error_line); { struct binder_transaction_log_entry *fe; - fe = binder_transaction_log_add( - &context->transaction_log_failed); + e->return_error = return_error; + e->return_error_param = return_error_param; + e->return_error_line = return_error_line; + fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; + /* + * write barrier to synchronize with initialization + * of log entry + */ + smp_wmb(); + WRITE_ONCE(e->debug_id_done, t_debug_id); + WRITE_ONCE(fe->debug_id_done, t_debug_id); } - BUG_ON(thread->return_error != BR_OK); + BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { - thread->return_error = BR_TRANSACTION_COMPLETE; + binder_restore_priority(current, in_reply_to->saved_priority); + thread->return_error.cmd = BR_TRANSACTION_COMPLETE; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); binder_send_failed_reply(in_reply_to, return_error); - } else - thread->return_error = return_error; + } else { + thread->return_error.cmd = return_error; + binder_enqueue_work(thread->proc, + &thread->return_error.work, + &thread->todo); + } } static int binder_thread_write(struct binder_proc *proc, @@ -2350,15 +3331,17 @@ static int binder_thread_write(struct binder_proc *proc, void __user *ptr = buffer + *consumed; void __user *end = buffer + size; - while (ptr < end && thread->return_error == BR_OK) { - if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr)) + while (ptr < end && thread->return_error.cmd == BR_OK) { + int ret; + + if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(context->binder_stats.bc)) { - context->binder_stats.bc[_IOC_NR(cmd)]++; - proc->stats.bc[_IOC_NR(cmd)]++; - thread->stats.bc[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { + atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); } switch (cmd) { case BC_INCREFS: @@ -2366,53 +3349,61 @@ static int binder_thread_write(struct binder_proc *proc, case BC_RELEASE: case BC_DECREFS: { uint32_t target; - struct binder_ref *ref; const char *debug_string; + bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; + bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; + struct binder_ref_data rdata; - if (get_user_preempt_disabled(target, (uint32_t __user *)ptr)) + if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; + ptr += sizeof(uint32_t); - if (target == 0 && context->binder_context_mgr_node && - (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { - ref = binder_get_ref_for_node(proc, - context->binder_context_mgr_node); - if (ref->desc != target) { - binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", - proc->pid, thread->pid, - ref->desc); - } - } else - ref = binder_get_ref(proc, target, - cmd == BC_ACQUIRE || - cmd == BC_RELEASE); - if (ref == NULL) { - binder_user_error("%d:%d refcount change on invalid ref %d\n", - proc->pid, thread->pid, target); - break; + ret = -1; + if (increment && !target) { + struct binder_node *ctx_mgr_node; + mutex_lock(&context->context_mgr_node_lock); + ctx_mgr_node = context->binder_context_mgr_node; + if (ctx_mgr_node) + ret = binder_inc_ref_for_node( + proc, ctx_mgr_node, + strong, NULL, &rdata); + mutex_unlock(&context->context_mgr_node_lock); + } + if (ret) + ret = binder_update_ref_for_handle( + proc, target, increment, strong, + &rdata); + if (!ret && rdata.desc != target) { + binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", + proc->pid, thread->pid, + target, rdata.desc); } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; - binder_inc_ref(ref, 0, NULL); break; case BC_ACQUIRE: debug_string = "Acquire"; - binder_inc_ref(ref, 1, NULL); break; case BC_RELEASE: debug_string = "Release"; - binder_dec_ref(ref, 1); break; case BC_DECREFS: default: debug_string = "DecRefs"; - binder_dec_ref(ref, 0); + break; + } + if (ret) { + binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", + proc->pid, thread->pid, debug_string, + strong, target, ret); break; } binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s ref %d desc %d s %d w %d for node %d\n", - proc->pid, thread->pid, debug_string, ref->debug_id, - ref->desc, ref->strong, ref->weak, ref->node->debug_id); + "%d:%d %s ref %d desc %d s %d w %d\n", + proc->pid, thread->pid, debug_string, + rdata.debug_id, rdata.desc, rdata.strong, + rdata.weak); break; } case BC_INCREFS_DONE: @@ -2420,11 +3411,12 @@ static int binder_thread_write(struct binder_proc *proc, binder_uintptr_t node_ptr; binder_uintptr_t cookie; struct binder_node *node; + bool free_node; - if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr)) + if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); node = binder_get_node(proc, node_ptr); @@ -2444,13 +3436,17 @@ static int binder_thread_write(struct binder_proc *proc, "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr, node->debug_id, (u64)cookie, (u64)node->cookie); + binder_put_node(node); break; } + binder_node_inner_lock(node); if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", proc->pid, thread->pid, node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); break; } node->pending_strong_ref = 0; @@ -2459,16 +3455,23 @@ static int binder_thread_write(struct binder_proc *proc, binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", proc->pid, thread->pid, node->debug_id); + binder_node_inner_unlock(node); + binder_put_node(node); break; } node->pending_weak_ref = 0; } - binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); + free_node = binder_dec_node_nilocked(node, + cmd == BC_ACQUIRE_DONE, 0); + WARN_ON(free_node); binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s node %d ls %d lw %d\n", + "%d:%d %s node %d ls %d lw %d tr %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", - node->debug_id, node->local_strong_refs, node->local_weak_refs); + node->debug_id, node->local_strong_refs, + node->local_weak_refs, node->tmp_refs); + binder_node_inner_unlock(node); + binder_put_node(node); break; } case BC_ATTEMPT_ACQUIRE: @@ -2482,11 +3485,12 @@ static int binder_thread_write(struct binder_proc *proc, binder_uintptr_t data_ptr; struct binder_buffer *buffer; - if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr)) + if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - buffer = binder_buffer_lookup(proc, data_ptr); + buffer = binder_alloc_prepare_to_free(&proc->alloc, + data_ptr); if (buffer == NULL) { binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", proc->pid, thread->pid, (u64)data_ptr); @@ -2508,15 +3512,25 @@ static int binder_thread_write(struct binder_proc *proc, buffer->transaction = NULL; } if (buffer->async_transaction && buffer->target_node) { - BUG_ON(!buffer->target_node->has_async_transaction); - if (list_empty(&buffer->target_node->async_todo)) - buffer->target_node->has_async_transaction = 0; + struct binder_node *buf_node; + struct binder_work *w; + + buf_node = buffer->target_node; + binder_node_inner_lock(buf_node); + BUG_ON(!buf_node->has_async_transaction); + BUG_ON(buf_node->proc != proc); + w = binder_dequeue_work_head_ilocked( + &buf_node->async_todo); + if (!w) + buf_node->has_async_transaction = 0; else - list_move_tail(buffer->target_node->async_todo.next, &thread->todo); + binder_enqueue_work_ilocked( + w, &thread->todo); + binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); - binder_free_buf(proc, buffer); + binder_alloc_free_buf(&proc->alloc, buffer); break; } @@ -2524,8 +3538,7 @@ static int binder_thread_write(struct binder_proc *proc, case BC_REPLY_SG: { struct binder_transaction_data_sg tr; - if (copy_from_user_preempt_disabled(&tr, ptr, - sizeof(tr))) + if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr.transaction_data, @@ -2536,7 +3549,7 @@ static int binder_thread_write(struct binder_proc *proc, case BC_REPLY: { struct binder_transaction_data tr; - if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr))) + if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, @@ -2548,6 +3561,7 @@ static int binder_thread_write(struct binder_proc *proc, binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); + binder_inner_proc_lock(proc); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", @@ -2561,6 +3575,7 @@ static int binder_thread_write(struct binder_proc *proc, proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; + binder_inner_proc_unlock(proc); break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, @@ -2585,15 +3600,37 @@ static int binder_thread_write(struct binder_proc *proc, uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; - struct binder_ref_death *death; + struct binder_ref_death *death = NULL; - if (get_user_preempt_disabled(target, (uint32_t __user *)ptr)) + if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); - ref = binder_get_ref(proc, target, false); + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { + /* + * Allocate memory for death notification + * before taking lock + */ + death = kzalloc(sizeof(*death), GFP_KERNEL); + if (death == NULL) { + WARN_ON(thread->return_error.cmd != + BR_OK); + thread->return_error.cmd = BR_ERROR; + binder_enqueue_work( + thread->proc, + &thread->return_error.work, + &thread->todo); + binder_debug( + BINDER_DEBUG_FAILED_TRANSACTION, + "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); + break; + } + } + binder_proc_lock(proc); + ref = binder_get_ref_olocked(proc, target, false); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, @@ -2601,6 +3638,8 @@ static int binder_thread_write(struct binder_proc *proc, "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); + binder_proc_unlock(proc); + kfree(death); break; } @@ -2610,21 +3649,18 @@ static int binder_thread_write(struct binder_proc *proc, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", - (u64)cookie, ref->debug_id, ref->desc, - ref->strong, ref->weak, ref->node->debug_id); + (u64)cookie, ref->data.debug_id, + ref->data.desc, ref->data.strong, + ref->data.weak, ref->node->debug_id); + binder_node_lock(ref->node); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); - break; - } - death = kzalloc_preempt_disabled(sizeof(*death)); - if (death == NULL) { - thread->return_error = BR_ERROR; - binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, - "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", - proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); + kfree(death); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -2633,17 +3669,29 @@ static int binder_thread_write(struct binder_proc *proc, ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&ref->death->work.entry, &thread->todo); - } else { - list_add_tail(&ref->death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work( + proc, + &ref->death->work, + &thread->todo); + else { + binder_inner_proc_lock(proc); + binder_enqueue_work_ilocked( + &ref->death->work, + &proc->todo); + binder_wakeup_proc_ilocked( + proc); + binder_inner_proc_unlock(proc); } } } else { if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } death = ref->death; @@ -2652,33 +3700,52 @@ static int binder_thread_write(struct binder_proc *proc, proc->pid, thread->pid, (u64)death->cookie, (u64)cookie); + binder_node_unlock(ref->node); + binder_proc_unlock(proc); break; } ref->death = NULL; + binder_inner_proc_lock(proc); if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, + &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked( + proc); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } + binder_inner_proc_unlock(proc); } + binder_node_unlock(ref->node); + binder_proc_unlock(proc); } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; binder_uintptr_t cookie; struct binder_ref_death *death = NULL; - if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr)) + + if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(cookie); - list_for_each_entry(w, &proc->delivered_death, entry) { - struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); + binder_inner_proc_lock(proc); + list_for_each_entry(w, &proc->delivered_death, + entry) { + struct binder_ref_death *tmp_death = + container_of(w, + struct binder_ref_death, + work); if (tmp_death->cookie == cookie) { death = tmp_death; @@ -2692,21 +3759,26 @@ static int binder_thread_write(struct binder_proc *proc, if (death == NULL) { binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", proc->pid, thread->pid, (u64)cookie); + binder_inner_proc_unlock(proc); break; } - - list_del_init(&death->work.entry); + binder_dequeue_work_ilocked(&death->work); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; - if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { - list_add_tail(&death->work.entry, &thread->todo); - } else { - list_add_tail(&death->work.entry, &proc->todo); - wake_up_interruptible(&proc->wait); + if (thread->looper & + (BINDER_LOOPER_STATE_REGISTERED | + BINDER_LOOPER_STATE_ENTERED)) + binder_enqueue_work_ilocked( + &death->work, &thread->todo); + else { + binder_enqueue_work_ilocked( + &death->work, + &proc->todo); + binder_wakeup_proc_ilocked(proc); } } - } - break; + binder_inner_proc_unlock(proc); + } break; default: pr_err("%d:%d unknown command %d\n", @@ -2722,24 +3794,80 @@ static void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); - if (_IOC_NR(cmd) < ARRAY_SIZE(proc->stats.br)) { - proc->context->binder_stats.br[_IOC_NR(cmd)]++; - proc->stats.br[_IOC_NR(cmd)]++; - thread->stats.br[_IOC_NR(cmd)]++; + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { + atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); + atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); + atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); } } -static int binder_has_proc_work(struct binder_proc *proc, - struct binder_thread *thread) +static int binder_has_thread_work(struct binder_thread *thread) { - return !list_empty(&proc->todo) || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); + return !binder_worklist_empty(thread->proc, &thread->todo) || + thread->looper_need_return; } -static int binder_has_thread_work(struct binder_thread *thread) +static int binder_put_node_cmd(struct binder_proc *proc, + struct binder_thread *thread, + void __user **ptrp, + binder_uintptr_t node_ptr, + binder_uintptr_t node_cookie, + int node_debug_id, + uint32_t cmd, const char *cmd_name) { - return !list_empty(&thread->todo) || thread->return_error != BR_OK || - (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); + void __user *ptr = *ptrp; + + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + + if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + + binder_stat_br(proc, thread, cmd); + binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", + proc->pid, thread->pid, cmd_name, node_debug_id, + (u64)node_ptr, (u64)node_cookie); + + *ptrp = ptr; + return 0; +} + +static int binder_wait_for_work(struct binder_thread *thread, + bool do_proc_work) +{ + DEFINE_WAIT(wait); + struct binder_proc *proc = thread->proc; + int ret = 0; + + freezer_do_not_count(); + binder_inner_proc_lock(proc); + for (;;) { + prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE); + if (binder_has_work_ilocked(thread, do_proc_work)) + break; + if (do_proc_work) + list_add(&thread->waiting_thread_node, + &proc->waiting_threads); + binder_inner_proc_unlock(proc); + schedule(); + binder_inner_proc_lock(proc); + list_del_init(&thread->waiting_thread_node); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + finish_wait(&thread->wait, &wait); + binder_inner_proc_unlock(proc); + freezer_count(); + + return ret; } static int binder_thread_read(struct binder_proc *proc, @@ -2755,43 +3883,21 @@ static int binder_thread_read(struct binder_proc *proc, int wait_for_proc_work; if (*consumed == 0) { - if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr)) + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo); - - if (thread->return_error != BR_OK && ptr < end) { - if (thread->return_error2 != BR_OK) { - if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error2); - if (ptr == end) - goto done; - thread->return_error2 = BR_OK; - } - if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - binder_stat_br(proc, thread, thread->return_error); - thread->return_error = BR_OK; - goto done; - } - + binder_inner_proc_lock(proc); + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); + binder_inner_proc_unlock(proc); thread->looper |= BINDER_LOOPER_STATE_WAITING; - if (wait_for_proc_work) - proc->ready_threads++; - - binder_unlock(proc->context, __func__); trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, - !list_empty(&thread->todo)); + !binder_worklist_empty(proc, &thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { @@ -2800,24 +3906,16 @@ retry: wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } - binder_set_nice(proc->default_priority); - if (non_block) { - if (!binder_has_proc_work(proc, thread)) - ret = -EAGAIN; - } else - ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); - } else { - if (non_block) { - if (!binder_has_thread_work(thread)) - ret = -EAGAIN; - } else - ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); + binder_restore_priority(current, proc->default_priority); } - binder_lock(proc->context, __func__); + if (non_block) { + if (!binder_has_work(thread, wait_for_proc_work)) + ret = -EAGAIN; + } else { + ret = binder_wait_for_work(thread, wait_for_proc_work); + } - if (wait_for_proc_work) - proc->ready_threads--; thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) @@ -2826,33 +3924,54 @@ retry: while (1) { uint32_t cmd; struct binder_transaction_data tr; - struct binder_work *w; + struct binder_work *w = NULL; + struct list_head *list = NULL; struct binder_transaction *t = NULL; + struct binder_thread *t_from; + + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&thread->todo)) + list = &thread->todo; + else if (!binder_worklist_empty_ilocked(&proc->todo) && + wait_for_proc_work) + list = &proc->todo; + else { + binder_inner_proc_unlock(proc); - if (!list_empty(&thread->todo)) { - w = list_first_entry(&thread->todo, struct binder_work, - entry); - } else if (!list_empty(&proc->todo) && wait_for_proc_work) { - w = list_first_entry(&proc->todo, struct binder_work, - entry); - } else { /* no data added */ - if (ptr - buffer == 4 && - !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) + if (ptr - buffer == 4 && !thread->looper_need_return) goto retry; break; } - if (end - ptr < sizeof(tr) + 4) + if (end - ptr < sizeof(tr) + 4) { + binder_inner_proc_unlock(proc); break; + } + w = binder_dequeue_work_head_ilocked(list); switch (w->type) { case BINDER_WORK_TRANSACTION: { + binder_inner_proc_unlock(proc); t = container_of(w, struct binder_transaction, work); } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + WARN_ON(e->cmd == BR_OK); + binder_inner_proc_unlock(proc); + if (put_user(e->cmd, (uint32_t __user *)ptr)) + return -EFAULT; + e->cmd = BR_OK; + ptr += sizeof(uint32_t); + + binder_stat_br(proc, thread, cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: { + binder_inner_proc_unlock(proc); cmd = BR_TRANSACTION_COMPLETE; - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) + if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); @@ -2860,112 +3979,134 @@ retry: binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); - - list_del(&w->entry); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); - uint32_t cmd = BR_NOOP; - const char *cmd_name; - int strong = node->internal_strong_refs || node->local_strong_refs; - int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; - - if (weak && !node->has_weak_ref) { - cmd = BR_INCREFS; - cmd_name = "BR_INCREFS"; + int strong, weak; + binder_uintptr_t node_ptr = node->ptr; + binder_uintptr_t node_cookie = node->cookie; + int node_debug_id = node->debug_id; + int has_weak_ref; + int has_strong_ref; + void __user *orig_ptr = ptr; + + BUG_ON(proc != node->proc); + strong = node->internal_strong_refs || + node->local_strong_refs; + weak = !hlist_empty(&node->refs) || + node->local_weak_refs || + node->tmp_refs || strong; + has_strong_ref = node->has_strong_ref; + has_weak_ref = node->has_weak_ref; + + if (weak && !has_weak_ref) { node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; - } else if (strong && !node->has_strong_ref) { - cmd = BR_ACQUIRE; - cmd_name = "BR_ACQUIRE"; + } + if (strong && !has_strong_ref) { node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; - } else if (!strong && node->has_strong_ref) { - cmd = BR_RELEASE; - cmd_name = "BR_RELEASE"; + } + if (!strong && has_strong_ref) node->has_strong_ref = 0; - } else if (!weak && node->has_weak_ref) { - cmd = BR_DECREFS; - cmd_name = "BR_DECREFS"; + if (!weak && has_weak_ref) node->has_weak_ref = 0; - } - if (cmd != BR_NOOP) { - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user_preempt_disabled(node->ptr, (binder_uintptr_t __user *) - (binder_uintptr_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - if (put_user_preempt_disabled(node->cookie, (binder_uintptr_t __user *) - (binder_uintptr_t __user *)ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - - binder_stat_br(proc, thread, cmd); - binder_debug(BINDER_DEBUG_USER_REFS, - "%d:%d %s %d u%016llx c%016llx\n", - proc->pid, thread->pid, cmd_name, - node->debug_id, - (u64)node->ptr, (u64)node->cookie); - } else { - list_del_init(&w->entry); - if (!weak && !strong) { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%016llx c%016llx deleted\n", - proc->pid, thread->pid, - node->debug_id, - (u64)node->ptr, - (u64)node->cookie); - rb_erase(&node->rb_node, &proc->nodes); - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); - } else { - binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "%d:%d node %d u%016llx c%016llx state unchanged\n", - proc->pid, thread->pid, - node->debug_id, - (u64)node->ptr, - (u64)node->cookie); - } - } + if (!weak && !strong) { + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx deleted\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + rb_erase(&node->rb_node, &proc->nodes); + binder_inner_proc_unlock(proc); + binder_node_lock(node); + /* + * Acquire the node lock before freeing the + * node to serialize with other threads that + * may have been holding the node lock while + * decrementing this node (avoids race where + * this thread frees while the other thread + * is unlocking the node after the final + * decrement) + */ + binder_node_unlock(node); + binder_free_node(node); + } else + binder_inner_proc_unlock(proc); + + if (weak && !has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_INCREFS, "BR_INCREFS"); + if (!ret && strong && !has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_ACQUIRE, "BR_ACQUIRE"); + if (!ret && !strong && has_strong_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_RELEASE, "BR_RELEASE"); + if (!ret && !weak && has_weak_ref) + ret = binder_put_node_cmd( + proc, thread, &ptr, node_ptr, + node_cookie, node_debug_id, + BR_DECREFS, "BR_DECREFS"); + if (orig_ptr == ptr) + binder_debug(BINDER_DEBUG_INTERNAL_REFS, + "%d:%d node %d u%016llx c%016llx state unchanged\n", + proc->pid, thread->pid, + node_debug_id, + (u64)node_ptr, + (u64)node_cookie); + if (ret) + return ret; } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; + binder_uintptr_t cookie; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(uint32_t); - if (put_user_preempt_disabled(death->cookie, (binder_uintptr_t __user *) ptr)) - return -EFAULT; - ptr += sizeof(binder_uintptr_t); - binder_stat_br(proc, thread, cmd); + cookie = death->cookie; + binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", - (u64)death->cookie); - + (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { - list_del(&w->entry); + binder_inner_proc_unlock(proc); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); - } else - list_move(&w->entry, &proc->delivered_death); + } else { + binder_enqueue_work_ilocked( + w, &proc->delivered_death); + binder_inner_proc_unlock(proc); + } + if (put_user(cmd, (uint32_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(uint32_t); + if (put_user(cookie, + (binder_uintptr_t __user *)ptr)) + return -EFAULT; + ptr += sizeof(binder_uintptr_t); + binder_stat_br(proc, thread, cmd); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; @@ -2977,16 +4118,14 @@ retry: BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; + struct binder_priority node_prio; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); + node_prio.sched_policy = target_node->sched_policy; + node_prio.prio = target_node->min_priority; + binder_transaction_priority(current, t, node_prio, + target_node->inherit_rt); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; @@ -2997,8 +4136,9 @@ retry: tr.flags = t->flags; tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); - if (t->from) { - struct task_struct *sender = t->from->proc->tsk; + t_from = binder_get_txn_from(t); + if (t_from) { + struct task_struct *sender = t_from->proc->tsk; tr.sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); @@ -3008,18 +4148,24 @@ retry: tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t)( - (uintptr_t)t->buffer->data + - proc->user_buffer_offset); + tr.data.ptr.buffer = (binder_uintptr_t) + ((uintptr_t)t->buffer->data + + binder_alloc_get_user_buffer_offset(&proc->alloc)); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); - if (put_user_preempt_disabled(cmd, (uint32_t __user *) ptr)) + if (put_user(cmd, (uint32_t __user *)ptr)) { + if (t_from) + binder_thread_dec_tmpref(t_from); return -EFAULT; + } ptr += sizeof(uint32_t); - if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr))) + if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (t_from) + binder_thread_dec_tmpref(t_from); return -EFAULT; + } ptr += sizeof(tr); trace_binder_transaction_received(t); @@ -3029,21 +4175,22 @@ retry: proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY", - t->debug_id, t->from ? t->from->proc->pid : 0, - t->from ? t->from->pid : 0, cmd, + t->debug_id, t_from ? t_from->proc->pid : 0, + t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); - list_del(&t->work.entry); + if (t_from) + binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; + binder_inner_proc_unlock(thread->proc); } else { - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); + binder_free_transaction(t); } break; } @@ -3051,29 +4198,36 @@ retry: done: *consumed = ptr - buffer; - if (proc->requested_threads + proc->ready_threads == 0 && + binder_inner_proc_lock(proc); + if (proc->requested_threads == 0 && + list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; + binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); - if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *) buffer)) + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); - } + } else + binder_inner_proc_unlock(proc); return 0; } -static void binder_release_work(struct list_head *list) +static void binder_release_work(struct binder_proc *proc, + struct list_head *list) { struct binder_work *w; - while (!list_empty(list)) { - w = list_first_entry(list, struct binder_work, entry); - list_del_init(&w->entry); + while (1) { + w = binder_dequeue_work_head(proc, list); + if (!w) + return; + switch (w->type) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; @@ -3086,11 +4240,17 @@ static void binder_release_work(struct list_head *list) binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d\n", t->debug_id); - t->buffer->transaction = NULL; - kfree(t); - binder_stats_deleted(BINDER_STAT_TRANSACTION); + binder_free_transaction(t); } } break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, + "undelivered TRANSACTION_ERROR: %u\n", + e->cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); @@ -3117,7 +4277,8 @@ static void binder_release_work(struct list_head *list) } -static struct binder_thread *binder_get_thread(struct binder_proc *proc) +static struct binder_thread *binder_get_thread_ilocked( + struct binder_proc *proc, struct binder_thread *new_thread) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; @@ -3132,38 +4293,102 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc) else if (current->pid > thread->pid) p = &(*p)->rb_right; else - break; + return thread; } - if (*p == NULL) { - thread = kzalloc_preempt_disabled(sizeof(*thread)); - if (thread == NULL) + if (!new_thread) + return NULL; + thread = new_thread; + binder_stats_created(BINDER_STAT_THREAD); + thread->proc = proc; + thread->pid = current->pid; + get_task_struct(current); + thread->task = current; + atomic_set(&thread->tmp_ref, 0); + init_waitqueue_head(&thread->wait); + INIT_LIST_HEAD(&thread->todo); + rb_link_node(&thread->rb_node, parent, p); + rb_insert_color(&thread->rb_node, &proc->threads); + thread->looper_need_return = true; + thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->return_error.cmd = BR_OK; + thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; + thread->reply_error.cmd = BR_OK; + INIT_LIST_HEAD(&new_thread->waiting_thread_node); + return thread; +} + +static struct binder_thread *binder_get_thread(struct binder_proc *proc) +{ + struct binder_thread *thread; + struct binder_thread *new_thread; + + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, NULL); + binder_inner_proc_unlock(proc); + if (!thread) { + new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); + if (new_thread == NULL) return NULL; - binder_stats_created(BINDER_STAT_THREAD); - thread->proc = proc; - thread->pid = current->pid; - init_waitqueue_head(&thread->wait); - INIT_LIST_HEAD(&thread->todo); - rb_link_node(&thread->rb_node, parent, p); - rb_insert_color(&thread->rb_node, &proc->threads); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; - thread->return_error = BR_OK; - thread->return_error2 = BR_OK; + binder_inner_proc_lock(proc); + thread = binder_get_thread_ilocked(proc, new_thread); + binder_inner_proc_unlock(proc); + if (thread != new_thread) + kfree(new_thread); } return thread; } -static int binder_free_thread(struct binder_proc *proc, - struct binder_thread *thread) +static void binder_free_proc(struct binder_proc *proc) +{ + BUG_ON(!list_empty(&proc->todo)); + BUG_ON(!list_empty(&proc->delivered_death)); + binder_alloc_deferred_release(&proc->alloc); + put_task_struct(proc->tsk); + binder_stats_deleted(BINDER_STAT_PROC); + kfree(proc); +} + +static void binder_free_thread(struct binder_thread *thread) +{ + BUG_ON(!list_empty(&thread->todo)); + binder_stats_deleted(BINDER_STAT_THREAD); + binder_proc_dec_tmpref(thread->proc); + put_task_struct(thread->task); + kfree(thread); +} + +static int binder_thread_release(struct binder_proc *proc, + struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; + struct binder_transaction *last_t = NULL; + binder_inner_proc_lock(thread->proc); + /* + * take a ref on the proc so it survives + * after we remove this thread from proc->threads. + * The corresponding dec is when we actually + * free the thread in binder_free_thread() + */ + proc->tmp_ref++; + /* + * take a ref on this thread to ensure it + * survives while we are releasing it + */ + atomic_inc(&thread->tmp_ref); rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; - if (t && t->to_thread == thread) - send_reply = t; + if (t) { + spin_lock(&t->lock); + if (t->to_thread == thread) + send_reply = t; + } + thread->is_dead = true; + while (t) { + last_t = t; active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "release %d:%d transaction %d %s, still active\n", @@ -3184,12 +4409,16 @@ static int binder_free_thread(struct binder_proc *proc, t = t->from_parent; } else BUG(); + spin_unlock(&last_t->lock); + if (t) + spin_lock(&t->lock); } + binder_inner_proc_unlock(thread->proc); + if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); - binder_release_work(&thread->todo); - kfree(thread); - binder_stats_deleted(BINDER_STAT_THREAD); + binder_release_work(proc, &thread->todo); + binder_thread_dec_tmpref(thread); return active_transactions; } @@ -3198,30 +4427,24 @@ static unsigned int binder_poll(struct file *filp, { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; - int wait_for_proc_work; - - binder_lock(proc->context, __func__); + bool wait_for_proc_work; thread = binder_get_thread(proc); - wait_for_proc_work = thread->transaction_stack == NULL && - list_empty(&thread->todo) && thread->return_error == BR_OK; + binder_inner_proc_lock(thread->proc); + thread->looper |= BINDER_LOOPER_STATE_POLL; + wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); - binder_unlock(proc->context, __func__); + binder_inner_proc_unlock(thread->proc); + + if (binder_has_work(thread, wait_for_proc_work)) + return POLLIN; + + poll_wait(filp, &thread->wait, wait); + + if (binder_has_thread_work(thread)) + return POLLIN; - if (wait_for_proc_work) { - if (binder_has_proc_work(proc, thread)) - return POLLIN; - poll_wait(filp, &proc->wait, wait); - if (binder_has_proc_work(proc, thread)) - return POLLIN; - } else { - if (binder_has_thread_work(thread)) - return POLLIN; - poll_wait(filp, &thread->wait, wait); - if (binder_has_thread_work(thread)) - return POLLIN; - } return 0; } @@ -3239,7 +4462,7 @@ static int binder_ioctl_write_read(struct file *filp, ret = -EINVAL; goto out; } - if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) { + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto out; } @@ -3257,7 +4480,7 @@ static int binder_ioctl_write_read(struct file *filp, trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } @@ -3268,10 +4491,12 @@ static int binder_ioctl_write_read(struct file *filp, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); - if (!list_empty(&proc->todo)) - wake_up_interruptible(&proc->wait); + binder_inner_proc_lock(proc); + if (!binder_worklist_empty_ilocked(&proc->todo)) + binder_wakeup_proc_ilocked(proc); + binder_inner_proc_unlock(proc); if (ret < 0) { - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } @@ -3281,7 +4506,7 @@ static int binder_ioctl_write_read(struct file *filp, proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); - if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) { + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } @@ -3294,9 +4519,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) int ret = 0; struct binder_proc *proc = filp->private_data; struct binder_context *context = proc->context; - + struct binder_node *new_node; kuid_t curr_euid = current_euid(); + mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; @@ -3317,24 +4543,52 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - context->binder_context_mgr_node = binder_new_node(proc, 0, 0); - if (!context->binder_context_mgr_node) { + new_node = binder_new_node(proc, NULL); + if (!new_node) { ret = -ENOMEM; goto out; } - context->binder_context_mgr_node->local_weak_refs++; - context->binder_context_mgr_node->local_strong_refs++; - context->binder_context_mgr_node->has_strong_ref = 1; - context->binder_context_mgr_node->has_weak_ref = 1; + binder_node_lock(new_node); + new_node->local_weak_refs++; + new_node->local_strong_refs++; + new_node->has_strong_ref = 1; + new_node->has_weak_ref = 1; + context->binder_context_mgr_node = new_node; + binder_node_unlock(new_node); + binder_put_node(new_node); out: + mutex_unlock(&context->context_mgr_node_lock); return ret; } +static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, + struct binder_node_debug_info *info) { + struct rb_node *n; + binder_uintptr_t ptr = info->ptr; + + memset(info, 0, sizeof(*info)); + + binder_inner_proc_lock(proc); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + if (node->ptr > ptr) { + info->ptr = node->ptr; + info->cookie = node->cookie; + info->has_strong_ref = node->has_strong_ref; + info->has_weak_ref = node->has_weak_ref; + break; + } + } + binder_inner_proc_unlock(proc); + + return 0; +} + static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; - struct binder_context *context = proc->context; struct binder_thread *thread; unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; @@ -3348,7 +4602,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (ret) goto err_unlocked; - binder_lock(context, __func__); thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; @@ -3361,12 +4614,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (ret) goto err; break; - case BINDER_SET_MAX_THREADS: - if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { + case BINDER_SET_MAX_THREADS: { + int max_threads; + + if (copy_from_user(&max_threads, ubuf, + sizeof(max_threads))) { ret = -EINVAL; goto err; } + binder_inner_proc_lock(proc); + proc->max_threads = max_threads; + binder_inner_proc_unlock(proc); break; + } case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp); if (ret) @@ -3375,7 +4635,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); - binder_free_thread(proc, thread); + binder_thread_release(proc, thread); thread = NULL; break; case BINDER_VERSION: { @@ -3385,8 +4645,27 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = -EINVAL; goto err; } - if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { - ret = -EINVAL; + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, + &ver->protocol_version)) { + ret = -EINVAL; + goto err; + } + break; + } + case BINDER_GET_NODE_DEBUG_INFO: { + struct binder_node_debug_info info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_debug_info(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; goto err; } break; @@ -3398,8 +4677,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ret = 0; err: if (thread) - thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; - binder_unlock(context, __func__); + thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); @@ -3428,8 +4706,7 @@ static void binder_vma_close(struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); - proc->vma = NULL; - proc->vma_vm_mm = NULL; + binder_alloc_vma_close(&proc->alloc); binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } @@ -3447,11 +4724,8 @@ static const struct vm_operations_struct binder_vm_ops = { static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { int ret; - - struct vm_struct *area; struct binder_proc *proc = filp->private_data; const char *failure_string; - struct binder_buffer *buffer; if (proc->tsk != current->group_leader) return -EINVAL; @@ -3460,8 +4734,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_end = vma->vm_start + SZ_4M; binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, + "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", + __func__, proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); @@ -3471,77 +4745,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) goto err_bad_arg; } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; - - mutex_lock(&proc->context->binder_mmap_lock); - if (proc->buffer) { - ret = -EBUSY; - failure_string = "already mapped"; - goto err_already_mapped; - } - - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - proc->buffer = area->addr; - proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; - mutex_unlock(&proc->context->binder_mmap_lock); - -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { - pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif - proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); - if (proc->pages == NULL) { - ret = -ENOMEM; - failure_string = "alloc page array"; - goto err_alloc_pages_failed; - } - proc->buffer_size = vma->vm_end - vma->vm_start; - vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; - /* binder_update_page_range assumes preemption is disabled */ - preempt_disable(); - ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma); - preempt_enable_no_resched(); - if (ret) { - ret = -ENOMEM; - failure_string = "alloc small buf"; - goto err_alloc_small_buf_failed; - } - buffer = proc->buffer; - INIT_LIST_HEAD(&proc->buffers); - list_add(&buffer->entry, &proc->buffers); - buffer->free = 1; - binder_insert_free_buffer(proc, buffer); - proc->free_async_space = proc->buffer_size / 2; - barrier(); + ret = binder_alloc_mmap_handler(&proc->alloc, vma); + if (ret) + return ret; proc->files = get_files_struct(current); - proc->vma = vma; - proc->vma_vm_mm = vma->vm_mm; - - /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", - proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; -err_alloc_small_buf_failed: - kfree(proc->pages); - proc->pages = NULL; -err_alloc_pages_failed: - mutex_lock(&proc->context->binder_mmap_lock); - vfree(proc->buffer); - proc->buffer = NULL; -err_get_vm_area_failed: -err_already_mapped: - mutex_unlock(&proc->context->binder_mmap_lock); err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); @@ -3559,24 +4771,33 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; + spin_lock_init(&proc->inner_lock); + spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); - init_waitqueue_head(&proc->wait); - proc->default_priority = task_nice(current); + if (binder_supported_policy(current->policy)) { + proc->default_priority.sched_policy = current->policy; + proc->default_priority.prio = current->normal_prio; + } else { + proc->default_priority.sched_policy = SCHED_NORMAL; + proc->default_priority.prio = NICE_TO_PRIO(0); + } + binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; - - binder_lock(proc->context, __func__); + binder_alloc_init(&proc->alloc); binder_stats_created(BINDER_STAT_PROC); - hlist_add_head(&proc->proc_node, &proc->context->binder_procs); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); + INIT_LIST_HEAD(&proc->waiting_threads); filp->private_data = proc; - binder_unlock(proc->context, __func__); + mutex_lock(&binder_procs_lock); + hlist_add_head(&proc->proc_node, &binder_procs); + mutex_unlock(&binder_procs_lock); if (binder_debugfs_dir_entry_proc) { char strbuf[11]; @@ -3612,16 +4833,17 @@ static void binder_deferred_flush(struct binder_proc *proc) struct rb_node *n; int wake_count = 0; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); - thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; + thread->looper_need_return = true; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } - wake_up_interruptible_all(&proc->wait); + binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, @@ -3641,15 +4863,22 @@ static int binder_release(struct inode *nodp, struct file *filp) static int binder_node_release(struct binder_node *node, int refs) { struct binder_ref *ref; - struct binder_context *context = node->proc->context; int death = 0; + struct binder_proc *proc = node->proc; - list_del_init(&node->work.entry); - binder_release_work(&node->async_todo); + binder_release_work(proc, &node->async_todo); - if (hlist_empty(&node->refs)) { - kfree(node); - binder_stats_deleted(BINDER_STAT_NODE); + binder_node_lock(node); + binder_inner_proc_lock(proc); + binder_dequeue_work_ilocked(&node->work); + /* + * The caller must have taken a temporary ref on the node, + */ + BUG_ON(!node->tmp_refs); + if (hlist_empty(&node->refs) && node->tmp_refs == 1) { + binder_inner_proc_unlock(proc); + binder_node_unlock(node); + binder_free_node(node); return refs; } @@ -3657,45 +4886,58 @@ static int binder_node_release(struct binder_node *node, int refs) node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; - hlist_add_head(&node->dead_node, &context->binder_dead_nodes); + binder_inner_proc_unlock(proc); + + spin_lock(&binder_dead_nodes_lock); + hlist_add_head(&node->dead_node, &binder_dead_nodes); + spin_unlock(&binder_dead_nodes_lock); hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; - - if (!ref->death) + /* + * Need the node lock to synchronize + * with new notification requests and the + * inner lock to synchronize with queued + * death notifications. + */ + binder_inner_proc_lock(ref->proc); + if (!ref->death) { + binder_inner_proc_unlock(ref->proc); continue; + } death++; - if (list_empty(&ref->death->work.entry)) { - ref->death->work.type = BINDER_WORK_DEAD_BINDER; - list_add_tail(&ref->death->work.entry, - &ref->proc->todo); - wake_up_interruptible(&ref->proc->wait); - } else - BUG(); + BUG_ON(!list_empty(&ref->death->work.entry)); + ref->death->work.type = BINDER_WORK_DEAD_BINDER; + binder_enqueue_work_ilocked(&ref->death->work, + &ref->proc->todo); + binder_wakeup_proc_ilocked(ref->proc); + binder_inner_proc_unlock(ref->proc); } binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death); + binder_node_unlock(node); + binder_put_node(node); return refs; } static void binder_deferred_release(struct binder_proc *proc) { - struct binder_transaction *t; struct binder_context *context = proc->context; struct rb_node *n; - int threads, nodes, incoming_refs, outgoing_refs, buffers, - active_transactions, page_count; + int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->vma); BUG_ON(proc->files); + mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); + mutex_unlock(&binder_procs_lock); + mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node && context->binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, @@ -3703,15 +4945,25 @@ static void binder_deferred_release(struct binder_proc *proc) __func__, proc->pid); context->binder_context_mgr_node = NULL; } + mutex_unlock(&context->context_mgr_node_lock); + binder_inner_proc_lock(proc); + /* + * Make sure proc stays alive after we + * remove all the threads + */ + proc->tmp_ref++; + proc->is_dead = true; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread; thread = rb_entry(n, struct binder_thread, rb_node); + binder_inner_proc_unlock(proc); threads++; - active_transactions += binder_free_thread(proc, thread); + active_transactions += binder_thread_release(proc, thread); + binder_inner_proc_lock(proc); } nodes = 0; @@ -3721,96 +4973,55 @@ static void binder_deferred_release(struct binder_proc *proc) node = rb_entry(n, struct binder_node, rb_node); nodes++; + /* + * take a temporary ref on the node before + * calling binder_node_release() which will either + * kfree() the node or call binder_put_node() + */ + binder_inc_node_tmpref_ilocked(node); rb_erase(&node->rb_node, &proc->nodes); - incoming_refs = binder_node_release(node, - incoming_refs); + binder_inner_proc_unlock(proc); + incoming_refs = binder_node_release(node, incoming_refs); + binder_inner_proc_lock(proc); } + binder_inner_proc_unlock(proc); outgoing_refs = 0; + binder_proc_lock(proc); while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref; ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; - binder_delete_ref(ref); - } - - binder_release_work(&proc->todo); - binder_release_work(&proc->delivered_death); - - buffers = 0; - while ((n = rb_first(&proc->allocated_buffers))) { - struct binder_buffer *buffer; - - buffer = rb_entry(n, struct binder_buffer, rb_node); - - t = buffer->transaction; - if (t) { - t->buffer = NULL; - buffer->transaction = NULL; - pr_err("release proc %d, transaction %d, not freed\n", - proc->pid, t->debug_id); - /*BUG();*/ - } - - binder_free_buf(proc, buffer); - buffers++; + binder_cleanup_ref_olocked(ref); + binder_proc_unlock(proc); + binder_free_ref(ref); + binder_proc_lock(proc); } + binder_proc_unlock(proc); - binder_stats_deleted(BINDER_STAT_PROC); - - page_count = 0; - if (proc->pages) { - int i; - - for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; - - if (!proc->pages[i]) - continue; - - page_addr = proc->buffer + i * PAGE_SIZE; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %p not freed\n", - __func__, proc->pid, i, page_addr); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); - __free_page(proc->pages[i]); - page_count++; - } - kfree(proc->pages); - preempt_enable_no_resched(); - vfree(proc->buffer); - preempt_disable(); - } - - put_task_struct(proc->tsk); + binder_release_work(proc, &proc->todo); + binder_release_work(proc, &proc->delivered_death); binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", + "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", __func__, proc->pid, threads, nodes, incoming_refs, - outgoing_refs, active_transactions, buffers, page_count); + outgoing_refs, active_transactions); - kfree(proc); + binder_proc_dec_tmpref(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; struct files_struct *files; - struct binder_context *context = - container_of(work, struct binder_context, deferred_work); int defer; do { - trace_binder_lock(__func__); - mutex_lock(&context->binder_main_lock); - trace_binder_locked(__func__); - - mutex_lock(&context->binder_deferred_lock); - preempt_disable(); - if (!hlist_empty(&context->binder_deferred_list)) { - proc = hlist_entry(context->binder_deferred_list.first, + mutex_lock(&binder_deferred_lock); + if (!hlist_empty(&binder_deferred_list)) { + proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; @@ -3819,7 +5030,7 @@ static void binder_deferred_func(struct work_struct *work) proc = NULL; defer = 0; } - mutex_unlock(&context->binder_deferred_lock); + mutex_unlock(&binder_deferred_lock); files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { @@ -3834,63 +5045,72 @@ static void binder_deferred_func(struct work_struct *work) if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ - trace_binder_unlock(__func__); - mutex_unlock(&context->binder_main_lock); - preempt_enable_no_resched(); if (files) put_files_struct(files); } while (proc); } +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { - mutex_lock(&proc->context->binder_deferred_lock); + mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, - &proc->context->binder_deferred_list); - queue_work(proc->context->binder_deferred_workqueue, - &proc->context->deferred_work); + &binder_deferred_list); + queue_work(binder_deferred_workqueue, &binder_deferred_work); } - mutex_unlock(&proc->context->binder_deferred_lock); + mutex_unlock(&binder_deferred_lock); } -static void print_binder_transaction(struct seq_file *m, const char *prefix, - struct binder_transaction *t) +static void print_binder_transaction_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + struct binder_transaction *t) { + struct binder_proc *to_proc; + struct binder_buffer *buffer = t->buffer; + + WARN_ON(!spin_is_locked(&proc->inner_lock)); + spin_lock(&t->lock); + to_proc = t->to_proc; seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, - t->to_proc ? t->to_proc->pid : 0, + to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); - if (t->buffer == NULL) { + t->code, t->flags, t->priority.sched_policy, + t->priority.prio, t->need_reply); + spin_unlock(&t->lock); + + if (proc != to_proc) { + /* + * Can only safely deref buffer if we are holding the + * correct proc inner lock for this node + */ + seq_puts(m, "\n"); + return; + } + + if (buffer == NULL) { seq_puts(m, " buffer free\n"); return; } - if (t->buffer->target_node) - seq_printf(m, " node %d", - t->buffer->target_node->debug_id); + if (buffer->target_node) + seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %p\n", - t->buffer->data_size, t->buffer->offsets_size, - t->buffer->data); -} - -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %p size %zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, - buffer->transaction ? "active" : "delivered"); + buffer->data); } -static void print_binder_work(struct seq_file *m, const char *prefix, - const char *transaction_prefix, - struct binder_work *w) +static void print_binder_work_ilocked(struct seq_file *m, + struct binder_proc *proc, + const char *prefix, + const char *transaction_prefix, + struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; @@ -3898,8 +5118,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix, switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); - print_binder_transaction(m, transaction_prefix, t); + print_binder_transaction_ilocked( + m, proc, transaction_prefix, t); break; + case BINDER_WORK_RETURN_ERROR: { + struct binder_error *e = container_of( + w, struct binder_error, work); + + seq_printf(m, "%stransaction error: %u\n", + prefix, e->cmd); + } break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; @@ -3924,70 +5152,90 @@ static void print_binder_work(struct seq_file *m, const char *prefix, } } -static void print_binder_thread(struct seq_file *m, - struct binder_thread *thread, - int print_always) +static void print_binder_thread_ilocked(struct seq_file *m, + struct binder_thread *thread, + int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; - seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); + WARN_ON(!spin_is_locked(&thread->proc->inner_lock)); + seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", + thread->pid, thread->looper, + thread->looper_need_return, + atomic_read(&thread->tmp_ref)); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { - print_binder_transaction(m, - " outgoing transaction", t); + print_binder_transaction_ilocked(m, thread->proc, + " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { - print_binder_transaction(m, + print_binder_transaction_ilocked(m, thread->proc, " incoming transaction", t); t = t->to_parent; } else { - print_binder_transaction(m, " bad transaction", t); + print_binder_transaction_ilocked(m, thread->proc, + " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { - print_binder_work(m, " ", " pending transaction", w); + print_binder_work_ilocked(m, thread->proc, " ", + " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } -static void print_binder_node(struct seq_file *m, struct binder_node *node) +static void print_binder_node_nilocked(struct seq_file *m, + struct binder_node *node) { struct binder_ref *ref; struct binder_work *w; int count; + WARN_ON(!spin_is_locked(&node->lock)); + if (node->proc) + WARN_ON(!spin_is_locked(&node->proc->inner_lock)); + count = 0; hlist_for_each_entry(ref, &node->refs, node_entry) count++; - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", + seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->sched_policy, node->min_priority, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, - node->internal_strong_refs, count); + node->internal_strong_refs, count, node->tmp_refs); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); - list_for_each_entry(w, &node->async_todo, entry) - print_binder_work(m, " ", - " pending async transaction", w); + if (node->proc) { + list_for_each_entry(w, &node->async_todo, entry) + print_binder_work_ilocked(m, node->proc, " ", + " pending async transaction", w); + } } -static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) +static void print_binder_ref_olocked(struct seq_file *m, + struct binder_ref *ref) { + WARN_ON(!spin_is_locked(&ref->proc->outer_lock)); + binder_node_lock(ref->node); seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", - ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", - ref->node->debug_id, ref->strong, ref->weak, ref->death); + ref->data.debug_id, ref->data.desc, + ref->node->proc ? "" : "dead ", + ref->node->debug_id, ref->data.strong, + ref->data.weak, ref->death); + binder_node_unlock(ref->node); } static void print_binder_proc(struct seq_file *m, @@ -3997,36 +5245,60 @@ static void print_binder_proc(struct seq_file *m, struct rb_node *n; size_t start_pos = m->count; size_t header_pos; + struct binder_node *last_node = NULL; seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); header_pos = m->count; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) - print_binder_thread(m, rb_entry(n, struct binder_thread, + print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, rb_node), print_all); + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); - if (print_all || node->has_async_transaction) - print_binder_node(m, node); - } + /* + * take a temporary reference on the node so it + * survives and isn't removed from the tree + * while we print it. + */ + binder_inc_node_tmpref_ilocked(node); + /* Need to drop inner lock to take node lock */ + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + binder_node_inner_lock(node); + print_binder_node_nilocked(m, node); + binder_node_inner_unlock(node); + last_node = node; + binder_inner_proc_lock(proc); + } + binder_inner_proc_unlock(proc); + if (last_node) + binder_put_node(last_node); + if (print_all) { + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) - print_binder_ref(m, rb_entry(n, struct binder_ref, - rb_node_desc)); + print_binder_ref_olocked(m, rb_entry(n, + struct binder_ref, + rb_node_desc)); + binder_proc_unlock(proc); } - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); + binder_alloc_print_allocated(m, &proc->alloc); + binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) - print_binder_work(m, " ", " pending transaction", w); + print_binder_work_ilocked(m, proc, " ", + " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } + binder_inner_proc_unlock(proc); if (!print_all && m->count == header_pos) m->count = start_pos; } @@ -4084,54 +5356,45 @@ static const char * const binder_objstat_strings[] = { "transaction_complete" }; -static void add_binder_stats(struct binder_stats *from, struct binder_stats *to) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(to->bc); i++) - to->bc[i] += from->bc[i]; - - for (i = 0; i < ARRAY_SIZE(to->br); i++) - to->br[i] += from->br[i]; -} - static void print_binder_stats(struct seq_file *m, const char *prefix, - struct binder_stats *stats, - struct binder_obj_stats *obj_stats) + struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { - if (stats->bc[i]) + int temp = atomic_read(&stats->bc[i]); + + if (temp) seq_printf(m, "%s%s: %d\n", prefix, - binder_command_strings[i], stats->bc[i]); + binder_command_strings[i], temp); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { - if (stats->br[i]) + int temp = atomic_read(&stats->br[i]); + + if (temp) seq_printf(m, "%s%s: %d\n", prefix, - binder_return_strings[i], stats->br[i]); + binder_return_strings[i], temp); } - if (!obj_stats) - return; - - BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); - BUILD_BUG_ON(ARRAY_SIZE(obj_stats->obj_created) != - ARRAY_SIZE(obj_stats->obj_deleted)); - for (i = 0; i < ARRAY_SIZE(obj_stats->obj_created); i++) { - int obj_created = atomic_read(&obj_stats->obj_created[i]); - int obj_deleted = atomic_read(&obj_stats->obj_deleted[i]); - - if (obj_created || obj_deleted) - seq_printf(m, "%s%s: active %d total %d\n", prefix, - binder_objstat_strings[i], - obj_created - obj_deleted, obj_created); + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != + ARRAY_SIZE(stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { + int created = atomic_read(&stats->obj_created[i]); + int deleted = atomic_read(&stats->obj_deleted[i]); + + if (created || deleted) + seq_printf(m, "%s%s: active %d total %d\n", + prefix, + binder_objstat_strings[i], + created - deleted, + created); } } @@ -4139,226 +5402,193 @@ static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; + struct binder_thread *thread; struct rb_node *n; - int count, strong, weak; + int count, strong, weak, ready_threads; + size_t free_async_space = + binder_alloc_get_free_async_space(&proc->alloc); seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); count = 0; + ready_threads = 0; + binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; + + list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) + ready_threads++; + seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, - proc->ready_threads, proc->free_async_space); + ready_threads, + free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; + binder_inner_proc_unlock(proc); seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; + binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; - strong += ref->strong; - weak += ref->weak; + strong += ref->data.strong; + weak += ref->data.weak; } + binder_proc_unlock(proc); seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); - count = 0; - for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) - count++; + count = binder_alloc_get_allocated_count(&proc->alloc); seq_printf(m, " buffers: %d\n", count); count = 0; + binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) { - switch (w->type) { - case BINDER_WORK_TRANSACTION: + if (w->type == BINDER_WORK_TRANSACTION) count++; - break; - default: - break; - } } + binder_inner_proc_unlock(proc); seq_printf(m, " pending transactions: %d\n", count); - print_binder_stats(m, " ", &proc->stats, NULL); + print_binder_stats(m, " ", &proc->stats); } static int binder_state_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; struct binder_node *node; - int do_lock = !binder_debug_no_lock; - bool wrote_dead_nodes_header = false; + struct binder_node *last_node = NULL; seq_puts(m, "binder state:\n"); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - if (!wrote_dead_nodes_header && - !hlist_empty(&context->binder_dead_nodes)) { - seq_puts(m, "dead nodes:\n"); - wrote_dead_nodes_header = true; - } - hlist_for_each_entry(node, &context->binder_dead_nodes, - dead_node) - print_binder_node(m, node); - - if (do_lock) - binder_unlock(context, __func__); - } - - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); + spin_lock(&binder_dead_nodes_lock); + if (!hlist_empty(&binder_dead_nodes)) + seq_puts(m, "dead nodes:\n"); + hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { + /* + * take a temporary reference on the node so it + * survives and isn't removed from the list + * while we print it. + */ + node->tmp_refs++; + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); + binder_node_lock(node); + print_binder_node_nilocked(m, node); + binder_node_unlock(node); + last_node = node; + spin_lock(&binder_dead_nodes_lock); + } + spin_unlock(&binder_dead_nodes_lock); + if (last_node) + binder_put_node(last_node); + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 1); + mutex_unlock(&binder_procs_lock); - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc(m, proc, 1); - if (do_lock) - binder_unlock(context, __func__); - } return 0; } static int binder_stats_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; - struct binder_stats total_binder_stats; - int do_lock = !binder_debug_no_lock; - - memset(&total_binder_stats, 0, sizeof(struct binder_stats)); - - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - - add_binder_stats(&context->binder_stats, &total_binder_stats); - - if (do_lock) - binder_unlock(context, __func__); - } seq_puts(m, "binder stats:\n"); - print_binder_stats(m, "", &total_binder_stats, &binder_obj_stats); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); + print_binder_stats(m, "", &binder_stats); + + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc_stats(m, proc); + mutex_unlock(&binder_procs_lock); - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc_stats(m, proc); - if (do_lock) - binder_unlock(context, __func__); - } return 0; } static int binder_transactions_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *proc; - int do_lock = !binder_debug_no_lock; seq_puts(m, "binder transactions:\n"); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - - hlist_for_each_entry(proc, &context->binder_procs, proc_node) - print_binder_proc(m, proc, 0); - if (do_lock) - binder_unlock(context, __func__); - } + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(proc, &binder_procs, proc_node) + print_binder_proc(m, proc, 0); + mutex_unlock(&binder_procs_lock); + return 0; } static int binder_proc_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; struct binder_proc *itr; int pid = (unsigned long)m->private; - int do_lock = !binder_debug_no_lock; - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - if (do_lock) - binder_lock(context, __func__); - - hlist_for_each_entry(itr, &context->binder_procs, proc_node) { - if (itr->pid == pid) { - seq_puts(m, "binder proc state:\n"); - print_binder_proc(m, itr, 1); - } + mutex_lock(&binder_procs_lock); + hlist_for_each_entry(itr, &binder_procs, proc_node) { + if (itr->pid == pid) { + seq_puts(m, "binder proc state:\n"); + print_binder_proc(m, itr, 1); } - if (do_lock) - binder_unlock(context, __func__); } + mutex_unlock(&binder_procs_lock); + return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { + int debug_id = READ_ONCE(e->debug_id_done); + /* + * read barrier to guarantee debug_id_done read before + * we print the log values + */ + smp_rmb(); seq_printf(m, - "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n", + "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->context_name, - e->to_node, e->target_handle, e->data_size, e->offsets_size); -} - -static int print_binder_transaction_log(struct seq_file *m, - struct binder_transaction_log *log) -{ - int i; - if (log->full) { - for (i = log->next; i < ARRAY_SIZE(log->entry); i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - } - for (i = 0; i < log->next; i++) - print_binder_transaction_log_entry(m, &log->entry[i]); - return 0; + e->to_node, e->target_handle, e->data_size, e->offsets_size, + e->return_error, e->return_error_param, + e->return_error_line); + /* + * read-barrier to guarantee read of debug_id_done after + * done printing the fields of the entry + */ + smp_rmb(); + seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? + "\n" : " (incomplete)\n"); } static int binder_transaction_log_show(struct seq_file *m, void *unused) { - struct binder_device *device; - struct binder_context *context; - - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - print_binder_transaction_log(m, &context->transaction_log); - } - return 0; -} + struct binder_transaction_log *log = m->private; + unsigned int log_cur = atomic_read(&log->cur); + unsigned int count; + unsigned int cur; + int i; -static int binder_failed_transaction_log_show(struct seq_file *m, void *unused) -{ - struct binder_device *device; - struct binder_context *context; + count = log_cur + 1; + cur = count < ARRAY_SIZE(log->entry) && !log->full ? + 0 : count % ARRAY_SIZE(log->entry); + if (count > ARRAY_SIZE(log->entry) || log->full) + count = ARRAY_SIZE(log->entry); + for (i = 0; i < count; i++) { + unsigned int index = cur++ % ARRAY_SIZE(log->entry); - hlist_for_each_entry(device, &binder_devices, hlist) { - context = &device->context; - print_binder_transaction_log(m, - &context->transaction_log_failed); + print_binder_transaction_log_entry(m, &log->entry[index]); } return 0; } @@ -4378,20 +5608,11 @@ BINDER_DEBUG_ENTRY(state); BINDER_DEBUG_ENTRY(stats); BINDER_DEBUG_ENTRY(transactions); BINDER_DEBUG_ENTRY(transaction_log); -BINDER_DEBUG_ENTRY(failed_transaction_log); - -static void __init free_binder_device(struct binder_device *device) -{ - if (device->context.binder_deferred_workqueue) - destroy_workqueue(device->context.binder_deferred_workqueue); - kfree(device); -} static int __init init_binder_device(const char *name) { int ret; struct binder_device *binder_device; - struct binder_context *context; binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); if (!binder_device) @@ -4401,65 +5622,34 @@ static int __init init_binder_device(const char *name) binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; - context = &binder_device->context; - context->binder_context_mgr_uid = INVALID_UID; - context->name = name; - - mutex_init(&context->binder_main_lock); - mutex_init(&context->binder_deferred_lock); - mutex_init(&context->binder_mmap_lock); - - context->binder_deferred_workqueue = - create_singlethread_workqueue(name); - - if (!context->binder_deferred_workqueue) { - ret = -ENOMEM; - goto err_create_singlethread_workqueue_failed; - } - - INIT_HLIST_HEAD(&context->binder_procs); - INIT_HLIST_HEAD(&context->binder_dead_nodes); - INIT_HLIST_HEAD(&context->binder_deferred_list); - INIT_WORK(&context->deferred_work, binder_deferred_func); + binder_device->context.binder_context_mgr_uid = INVALID_UID; + binder_device->context.name = name; + mutex_init(&binder_device->context.context_mgr_node_lock); ret = misc_register(&binder_device->miscdev); if (ret < 0) { - goto err_misc_register_failed; + kfree(binder_device); + return ret; } hlist_add_head(&binder_device->hlist, &binder_devices); - return ret; - -err_create_singlethread_workqueue_failed: -err_misc_register_failed: - free_binder_device(binder_device); return ret; } static int __init binder_init(void) { - int ret = 0; + int ret; char *device_name, *device_names; struct binder_device *device; struct hlist_node *tmp; - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); - if (!device_names) + atomic_set(&binder_transaction_log.cur, ~0U); + atomic_set(&binder_transaction_log_failed.cur, ~0U); + binder_deferred_workqueue = create_singlethread_workqueue("binder"); + if (!binder_deferred_workqueue) return -ENOMEM; - strcpy(device_names, binder_devices_param); - - while ((device_name = strsep(&device_names, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; - } - binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); if (binder_debugfs_dir_entry_root) binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", @@ -4484,13 +5674,30 @@ static int __init binder_init(void) debugfs_create_file("transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - NULL, + &binder_transaction_log, &binder_transaction_log_fops); debugfs_create_file("failed_transaction_log", S_IRUGO, binder_debugfs_dir_entry_root, - NULL, - &binder_failed_transaction_log_fops); + &binder_transaction_log_failed, + &binder_transaction_log_fops); + } + + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } + strcpy(device_names, binder_devices_param); + + while ((device_name = strsep(&device_names, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; } return ret; @@ -4499,8 +5706,12 @@ err_init_binder_device_failed: hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { misc_deregister(&device->miscdev); hlist_del(&device->hlist); - free_binder_device(device); + kfree(device); } +err_alloc_device_names_failed: + debugfs_remove_recursive(binder_debugfs_dir_entry_root); + + destroy_workqueue(binder_deferred_workqueue); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c new file mode 100644 index 000000000000..aabfebac6e57 --- /dev/null +++ b/drivers/android/binder_alloc.c @@ -0,0 +1,802 @@ +/* binder_alloc.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/cacheflush.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/rtmutex.h> +#include <linux/rbtree.h> +#include <linux/seq_file.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include "binder_alloc.h" +#include "binder_trace.h" + +static DEFINE_MUTEX(binder_alloc_mmap_lock); + +enum { + BINDER_DEBUG_OPEN_CLOSE = 1U << 1, + BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, +}; +static uint32_t binder_alloc_debug_mask; + +module_param_named(debug_mask, binder_alloc_debug_mask, + uint, S_IWUSR | S_IRUGO); + +#define binder_alloc_debug(mask, x...) \ + do { \ + if (binder_alloc_debug_mask & mask) \ + pr_info(x); \ + } while (0) + +static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + if (list_is_last(&buffer->entry, &alloc->buffers)) + return alloc->buffer + + alloc->buffer_size - (void *)buffer->data; + return (size_t)list_entry(buffer->entry.next, + struct binder_buffer, entry) - (size_t)buffer->data; +} + +static void binder_insert_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->free_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + size_t buffer_size; + size_t new_buffer_size; + + BUG_ON(!new_buffer->free); + + new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: add free buffer, size %zd, at %pK\n", + alloc->pid, new_buffer_size, new_buffer); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (new_buffer_size < buffer_size) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); +} + +static void binder_insert_allocated_buffer_locked( + struct binder_alloc *alloc, struct binder_buffer *new_buffer) +{ + struct rb_node **p = &alloc->allocated_buffers.rb_node; + struct rb_node *parent = NULL; + struct binder_buffer *buffer; + + BUG_ON(new_buffer->free); + + while (*p) { + parent = *p; + buffer = rb_entry(parent, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (new_buffer < buffer) + p = &parent->rb_left; + else if (new_buffer > buffer) + p = &parent->rb_right; + else + BUG(); + } + rb_link_node(&new_buffer->rb_node, parent, p); + rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); +} + +static struct binder_buffer *binder_alloc_prepare_to_free_locked( + struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct rb_node *n = alloc->allocated_buffers.rb_node; + struct binder_buffer *buffer; + struct binder_buffer *kern_ptr; + + kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset + - offsetof(struct binder_buffer, data)); + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(buffer->free); + + if (kern_ptr < buffer) + n = n->rb_left; + else if (kern_ptr > buffer) + n = n->rb_right; + else { + /* + * Guard against user threads attempting to + * free the buffer twice + */ + if (buffer->free_in_progress) { + pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n", + alloc->pid, current->pid, (u64)user_ptr); + return NULL; + } + buffer->free_in_progress = 1; + return buffer; + } + } + return NULL; +} + +/** + * binder_alloc_buffer_lookup() - get buffer given user ptr + * @alloc: binder_alloc for this proc + * @user_ptr: User pointer to buffer data + * + * Validate userspace pointer to buffer data and return buffer corresponding to + * that user pointer. Search the rb tree for buffer that matches user data + * pointer. + * + * Return: Pointer to buffer or NULL + */ +struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, + void *start, void *end, + struct vm_area_struct *vma) +{ + void *page_addr; + unsigned long user_page_addr; + struct page **page; + struct mm_struct *mm; + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: %s pages %pK-%pK\n", alloc->pid, + allocate ? "allocate" : "free", start, end); + + if (end <= start) + return 0; + + trace_binder_update_page_range(alloc, allocate, start, end); + + if (vma) + mm = NULL; + else + mm = get_task_mm(alloc->tsk); + + if (mm) { + down_write(&mm->mmap_sem); + vma = alloc->vma; + if (vma && mm != alloc->vma_vm_mm) { + pr_err("%d: vma mm and task mm mismatch\n", + alloc->pid); + vma = NULL; + } + } + + if (allocate == 0) + goto free_range; + + if (vma == NULL) { + pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", + alloc->pid); + goto err_no_vma; + } + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + int ret; + + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + + BUG_ON(*page); + *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (*page == NULL) { + pr_err("%d: binder_alloc_buf failed for page at %pK\n", + alloc->pid, page_addr); + goto err_alloc_page_failed; + } + ret = map_kernel_range_noflush((unsigned long)page_addr, + PAGE_SIZE, PAGE_KERNEL, page); + flush_cache_vmap((unsigned long)page_addr, + (unsigned long)page_addr + PAGE_SIZE); + if (ret != 1) { + pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", + alloc->pid, page_addr); + goto err_map_kernel_failed; + } + user_page_addr = + (uintptr_t)page_addr + alloc->user_buffer_offset; + ret = vm_insert_page(vma, user_page_addr, page[0]); + if (ret) { + pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", + alloc->pid, user_page_addr); + goto err_vm_insert_page_failed; + } + /* vm_insert_page does not seem to increment the refcount */ + } + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return 0; + +free_range: + for (page_addr = end - PAGE_SIZE; page_addr >= start; + page_addr -= PAGE_SIZE) { + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; + if (vma) + zap_page_range(vma, (uintptr_t)page_addr + + alloc->user_buffer_offset, PAGE_SIZE, NULL); +err_vm_insert_page_failed: + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +err_map_kernel_failed: + __free_page(*page); + *page = NULL; +err_alloc_page_failed: + ; + } +err_no_vma: + if (mm) { + up_write(&mm->mmap_sem); + mmput(mm); + } + return vma ? -ENOMEM : -ESRCH; +} + +struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct rb_node *n = alloc->free_buffers.rb_node; + struct binder_buffer *buffer; + size_t buffer_size; + struct rb_node *best_fit = NULL; + void *has_page_addr; + void *end_page_addr; + size_t size, data_offsets_size; + int ret; + + if (alloc->vma == NULL) { + pr_err("%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } + + data_offsets_size = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + + if (data_offsets_size < data_size || data_offsets_size < offsets_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd\n", + alloc->pid, data_size, offsets_size); + return ERR_PTR(-EINVAL); + } + size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); + if (size < data_offsets_size || size < extra_buffers_size) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid extra_buffers_size %zd\n", + alloc->pid, extra_buffers_size); + return ERR_PTR(-EINVAL); + } + if (is_async && + alloc->free_async_space < size + sizeof(struct binder_buffer)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd failed, no async space left\n", + alloc->pid, size); + return ERR_PTR(-ENOSPC); + } + + while (n) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + BUG_ON(!buffer->free); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + if (size < buffer_size) { + best_fit = n; + n = n->rb_left; + } else if (size > buffer_size) + n = n->rb_right; + else { + best_fit = n; + break; + } + } + if (best_fit == NULL) { + size_t allocated_buffers = 0; + size_t largest_alloc_size = 0; + size_t total_alloc_size = 0; + size_t free_buffers = 0; + size_t largest_free_size = 0; + size_t total_free_size = 0; + + for (n = rb_first(&alloc->allocated_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + for (n = rb_first(&alloc->free_buffers); n != NULL; + n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } + pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", + alloc->pid, size); + pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, largest_alloc_size, + total_free_size, free_buffers, largest_free_size); + return ERR_PTR(-ENOSPC); + } + if (n == NULL) { + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + } + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", + alloc->pid, size, buffer, buffer_size); + + has_page_addr = + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + if (n == NULL) { + if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) + buffer_size = size; /* no room for other buffers */ + else + buffer_size = size + sizeof(struct binder_buffer); + } + end_page_addr = + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); + if (end_page_addr > has_page_addr) + end_page_addr = has_page_addr; + ret = binder_update_page_range(alloc, 1, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL); + if (ret) + return ERR_PTR(ret); + + rb_erase(best_fit, &alloc->free_buffers); + buffer->free = 0; + buffer->free_in_progress = 0; + binder_insert_allocated_buffer_locked(alloc, buffer); + if (buffer_size != size) { + struct binder_buffer *new_buffer = (void *)buffer->data + size; + + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + } + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_alloc_buf size %zd got %pK\n", + alloc->pid, size, buffer); + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->async_transaction = is_async; + buffer->extra_buffers_size = extra_buffers_size; + if (is_async) { + alloc->free_async_space -= size + sizeof(struct binder_buffer); + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_alloc_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + return buffer; +} + +/** + * binder_alloc_new_buf() - Allocate a new binder buffer + * @alloc: binder_alloc for this proc + * @data_size: size of user data buffer + * @offsets_size: user specified buffer offset + * @extra_buffers_size: size of extra space for meta-data (eg, security context) + * @is_async: buffer for async transaction + * + * Allocate a new buffer given the requested sizes. Returns + * the kernel version of the buffer pointer. The size allocated + * is the sum of the three given sizes (each rounded up to + * pointer-sized boundary) + * + * Return: The allocated buffer or %NULL if error + */ +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, + extra_buffers_size, is_async); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static void *buffer_start_page(struct binder_buffer *buffer) +{ + return (void *)((uintptr_t)buffer & PAGE_MASK); +} + +static void *buffer_end_page(struct binder_buffer *buffer) +{ + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); +} + +static void binder_delete_free_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + struct binder_buffer *prev, *next = NULL; + int free_page_end = 1; + int free_page_start = 1; + + BUG_ON(alloc->buffers.next == &buffer->entry); + prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); + BUG_ON(!prev->free); + if (buffer_end_page(prev) == buffer_start_page(buffer)) { + free_page_start = 0; + if (buffer_end_page(prev) == buffer_end_page(buffer)) + free_page_end = 0; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); + } + + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + if (buffer_start_page(next) == buffer_end_page(buffer)) { + free_page_end = 0; + if (buffer_start_page(next) == + buffer_start_page(buffer)) + free_page_start = 0; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); + } + } + list_del(&buffer->entry); + if (free_page_start || free_page_end) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", + alloc->pid, buffer, free_page_start ? "" : " end", + free_page_end ? "" : " start", prev, next); + binder_update_page_range(alloc, 0, free_page_start ? + buffer_start_page(buffer) : buffer_end_page(buffer), + (free_page_end ? buffer_end_page(buffer) : + buffer_start_page(buffer)) + PAGE_SIZE, NULL); + } +} + +static void binder_free_buf_locked(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + size_t size, buffer_size; + + buffer_size = binder_alloc_buffer_size(alloc, buffer); + + size = ALIGN(buffer->data_size, sizeof(void *)) + + ALIGN(buffer->offsets_size, sizeof(void *)) + + ALIGN(buffer->extra_buffers_size, sizeof(void *)); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: binder_free_buf %pK size %zd buffer_size %zd\n", + alloc->pid, buffer, size, buffer_size); + + BUG_ON(buffer->free); + BUG_ON(size > buffer_size); + BUG_ON(buffer->transaction != NULL); + BUG_ON((void *)buffer < alloc->buffer); + BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); + + if (buffer->async_transaction) { + alloc->free_async_space += size + sizeof(struct binder_buffer); + + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + "%d: binder_free_buf size %zd async free %zd\n", + alloc->pid, size, alloc->free_async_space); + } + + binder_update_page_range(alloc, 0, + (void *)PAGE_ALIGN((uintptr_t)buffer->data), + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), + NULL); + + rb_erase(&buffer->rb_node, &alloc->allocated_buffers); + buffer->free = 1; + if (!list_is_last(&buffer->entry, &alloc->buffers)) { + struct binder_buffer *next = list_entry(buffer->entry.next, + struct binder_buffer, entry); + + if (next->free) { + rb_erase(&next->rb_node, &alloc->free_buffers); + binder_delete_free_buffer(alloc, next); + } + } + if (alloc->buffers.next != &buffer->entry) { + struct binder_buffer *prev = list_entry(buffer->entry.prev, + struct binder_buffer, entry); + + if (prev->free) { + binder_delete_free_buffer(alloc, buffer); + rb_erase(&prev->rb_node, &alloc->free_buffers); + buffer = prev; + } + } + binder_insert_free_buffer(alloc, buffer); +} + +/** + * binder_alloc_free_buf() - free a binder buffer + * @alloc: binder_alloc for this proc + * @buffer: kernel pointer to buffer + * + * Free the buffer allocated via binder_alloc_new_buffer() + */ +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_mmap_handler() - map virtual address space for proc + * @alloc: alloc structure for this proc + * @vma: vma passed to mmap() + * + * Called by binder_mmap() to initialize the space specified in + * vma for allocating binder buffers + * + * Return: + * 0 = success + * -EBUSY = address space already mapped + * -ENOMEM = failed to map memory to given address space + */ +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + int ret; + struct vm_struct *area; + const char *failure_string; + struct binder_buffer *buffer; + + mutex_lock(&binder_alloc_mmap_lock); + if (alloc->buffer) { + ret = -EBUSY; + failure_string = "already mapped"; + goto err_already_mapped; + } + + area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); + if (area == NULL) { + ret = -ENOMEM; + failure_string = "get_vm_area"; + goto err_get_vm_area_failed; + } + alloc->buffer = area->addr; + alloc->user_buffer_offset = + vma->vm_start - (uintptr_t)alloc->buffer; + mutex_unlock(&binder_alloc_mmap_lock); + +#ifdef CONFIG_CPU_CACHE_VIPT + if (cache_is_vipt_aliasing()) { + while (CACHE_COLOUR( + (vma->vm_start ^ (uint32_t)alloc->buffer))) { + pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", + alloc->pid, vma->vm_start, vma->vm_end, + alloc->buffer); + vma->vm_start += PAGE_SIZE; + } + } +#endif + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + GFP_KERNEL); + if (alloc->pages == NULL) { + ret = -ENOMEM; + failure_string = "alloc page array"; + goto err_alloc_pages_failed; + } + alloc->buffer_size = vma->vm_end - vma->vm_start; + + if (binder_update_page_range(alloc, 1, alloc->buffer, + alloc->buffer + PAGE_SIZE, vma)) { + ret = -ENOMEM; + failure_string = "alloc small buf"; + goto err_alloc_small_buf_failed; + } + buffer = alloc->buffer; + INIT_LIST_HEAD(&alloc->buffers); + list_add(&buffer->entry, &alloc->buffers); + buffer->free = 1; + binder_insert_free_buffer(alloc, buffer); + alloc->free_async_space = alloc->buffer_size / 2; + barrier(); + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; + + return 0; + +err_alloc_small_buf_failed: + kfree(alloc->pages); + alloc->pages = NULL; +err_alloc_pages_failed: + mutex_lock(&binder_alloc_mmap_lock); + vfree(alloc->buffer); + alloc->buffer = NULL; +err_get_vm_area_failed: +err_already_mapped: + mutex_unlock(&binder_alloc_mmap_lock); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + + +void binder_alloc_deferred_release(struct binder_alloc *alloc) +{ + struct rb_node *n; + int buffers, page_count; + + BUG_ON(alloc->vma); + + buffers = 0; + mutex_lock(&alloc->mutex); + while ((n = rb_first(&alloc->allocated_buffers))) { + struct binder_buffer *buffer; + + buffer = rb_entry(n, struct binder_buffer, rb_node); + + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + + binder_free_buf_locked(alloc, buffer); + buffers++; + } + + page_count = 0; + if (alloc->pages) { + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + void *page_addr; + + if (!alloc->pages[i]) + continue; + + page_addr = alloc->buffer + i * PAGE_SIZE; + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %pK not freed\n", + __func__, alloc->pid, i, page_addr); + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); + __free_page(alloc->pages[i]); + page_count++; + } + kfree(alloc->pages); + vfree(alloc->buffer); + } + mutex_unlock(&alloc->mutex); + + binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d buffers %d, pages %d\n", + __func__, alloc->pid, buffers, page_count); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", + prefix, buffer->debug_id, buffer->data, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); +} + +/** + * binder_alloc_print_allocated() - print buffer info + * @m: seq_file for output via seq_printf() + * @alloc: binder_alloc for this proc + * + * Prints information about every buffer associated with + * the binder_alloc state to the given seq_file + */ +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct rb_node *n; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + mutex_unlock(&alloc->mutex); +} + +/** + * binder_alloc_get_allocated_count() - return count of buffers + * @alloc: binder_alloc for this proc + * + * Return: count of allocated buffers + */ +int binder_alloc_get_allocated_count(struct binder_alloc *alloc) +{ + struct rb_node *n; + int count = 0; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + mutex_unlock(&alloc->mutex); + return count; +} + + +/** + * binder_alloc_vma_close() - invalidate address space + * @alloc: binder_alloc for this proc + * + * Called from binder_vma_close() when releasing address space. + * Clears alloc->vma to prevent new incoming transactions from + * allocating more buffers. + */ +void binder_alloc_vma_close(struct binder_alloc *alloc) +{ + WRITE_ONCE(alloc->vma, NULL); + WRITE_ONCE(alloc->vma_vm_mm, NULL); +} + +/** + * binder_alloc_init() - called by binder_open() for per-proc initialization + * @alloc: binder_alloc for this proc + * + * Called from binder_open() to initialize binder_alloc fields for + * new binder proc + */ +void binder_alloc_init(struct binder_alloc *alloc) +{ + alloc->tsk = current->group_leader; + alloc->pid = current->group_leader->pid; + mutex_init(&alloc->mutex); +} + diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h new file mode 100644 index 000000000000..088e4ffc6230 --- /dev/null +++ b/drivers/android/binder_alloc.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_BINDER_ALLOC_H +#define _LINUX_BINDER_ALLOC_H + +#include <linux/rbtree.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/rtmutex.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> + +struct binder_transaction; + +/** + * struct binder_buffer - buffer used for binder transactions + * @entry: entry alloc->buffers + * @rb_node: node for allocated_buffers/free_buffers rb trees + * @free: true if buffer is free + * @allow_user_free: describe the second member of struct blah, + * @async_transaction: describe the second member of struct blah, + * @debug_id: describe the second member of struct blah, + * @transaction: describe the second member of struct blah, + * @target_node: describe the second member of struct blah, + * @data_size: describe the second member of struct blah, + * @offsets_size: describe the second member of struct blah, + * @extra_buffers_size: describe the second member of struct blah, + * @data:i describe the second member of struct blah, + * + * Bookkeeping structure for binder transaction buffers + */ +struct binder_buffer { + struct list_head entry; /* free and allocated entries by address */ + struct rb_node rb_node; /* free entry by size or allocated entry */ + /* by address */ + unsigned free:1; + unsigned allow_user_free:1; + unsigned async_transaction:1; + unsigned free_in_progress:1; + unsigned debug_id:28; + + struct binder_transaction *transaction; + + struct binder_node *target_node; + size_t data_size; + size_t offsets_size; + size_t extra_buffers_size; + uint8_t data[0]; +}; + +/** + * struct binder_alloc - per-binder proc state for binder allocator + * @vma: vm_area_struct passed to mmap_handler + * (invarient after mmap) + * @tsk: tid for task that called init for this proc + * (invariant after init) + * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) + * @buffer: base of per-proc address space mapped via mmap + * @user_buffer_offset: offset between user and kernel VAs for buffer + * @buffers: list of all buffers for this proc + * @free_buffers: rb tree of buffers available for allocation + * sorted by size + * @allocated_buffers: rb tree of allocated buffers sorted by address + * @free_async_space: VA space available for async buffers. This is + * initialized at mmap time to 1/2 the full VA space + * @pages: array of physical page addresses for each + * page of mmap'd space + * @buffer_size: size of address space specified via mmap + * @pid: pid for associated binder_proc (invariant after init) + * + * Bookkeeping structure for per-proc address space management for binder + * buffers. It is normally initialized during binder_init() and binder_mmap() + * calls. The address space is used for both user-visible buffers and for + * struct binder_buffer objects used to track the user buffers + */ +struct binder_alloc { + struct mutex mutex; + struct task_struct *tsk; + struct vm_area_struct *vma; + struct mm_struct *vma_vm_mm; + void *buffer; + ptrdiff_t user_buffer_offset; + struct list_head buffers; + struct rb_root free_buffers; + struct rb_root allocated_buffers; + size_t free_async_space; + struct page **pages; + size_t buffer_size; + uint32_t buffer_free; + int pid; +}; + +extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +extern void binder_alloc_init(struct binder_alloc *alloc); +extern void binder_alloc_vma_close(struct binder_alloc *alloc); +extern struct binder_buffer * +binder_alloc_prepare_to_free(struct binder_alloc *alloc, + uintptr_t user_ptr); +extern void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +extern void binder_alloc_deferred_release(struct binder_alloc *alloc); +extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +extern void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); + +/** + * binder_alloc_get_free_async_space() - get free space available for async + * @alloc: binder_alloc for this proc + * + * Return: the bytes remaining in the address-space for async transactions + */ +static inline size_t +binder_alloc_get_free_async_space(struct binder_alloc *alloc) +{ + size_t free_async_space; + + mutex_lock(&alloc->mutex); + free_async_space = alloc->free_async_space; + mutex_unlock(&alloc->mutex); + return free_async_space; +} + +/** + * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs + * @alloc: binder_alloc for this proc + * + * Return: the offset between kernel and user-space addresses to use for + * virtual address conversion + */ +static inline ptrdiff_t +binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) +{ + /* + * user_buffer_offset is constant if vma is set and + * undefined if vma is not set. It is possible to + * get here with !alloc->vma if the target process + * is dying while a transaction is being initiated. + * Returning the old value is ok in this case and + * the transaction will fail. + */ + return alloc->user_buffer_offset; +} + +#endif /* _LINUX_BINDER_ALLOC_H */ + diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 7f20f3dc8369..7967db16ba5a 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -23,7 +23,8 @@ struct binder_buffer; struct binder_node; struct binder_proc; -struct binder_ref; +struct binder_alloc; +struct binder_ref_data; struct binder_thread; struct binder_transaction; @@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received, TRACE_EVENT(binder_transaction_node_to_ref, TP_PROTO(struct binder_transaction *t, struct binder_node *node, - struct binder_ref *ref), - TP_ARGS(t, node, ref), + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), TP_STRUCT__entry( __field(int, debug_id) @@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref, __entry->debug_id = t->debug_id; __entry->node_debug_id = node->debug_id; __entry->node_ptr = node->ptr; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; ), TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", __entry->debug_id, __entry->node_debug_id, @@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref, ); TRACE_EVENT(binder_transaction_ref_to_node, - TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), - TP_ARGS(t, ref), + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *rdata), + TP_ARGS(t, node, rdata), TP_STRUCT__entry( __field(int, debug_id) @@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node, ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->ref_debug_id = ref->debug_id; - __entry->ref_desc = ref->desc; - __entry->node_debug_id = ref->node->debug_id; - __entry->node_ptr = ref->node->ptr; + __entry->ref_debug_id = rdata->debug_id; + __entry->ref_desc = rdata->desc; + __entry->node_debug_id = node->debug_id; + __entry->node_ptr = node->ptr; ), TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", __entry->debug_id, __entry->node_debug_id, @@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node, ); TRACE_EVENT(binder_transaction_ref_to_ref, - TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, - struct binder_ref *dest_ref), - TP_ARGS(t, src_ref, dest_ref), + TP_PROTO(struct binder_transaction *t, struct binder_node *node, + struct binder_ref_data *src_ref, + struct binder_ref_data *dest_ref), + TP_ARGS(t, node, src_ref, dest_ref), TP_STRUCT__entry( __field(int, debug_id) @@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref, ), TP_fast_assign( __entry->debug_id = t->debug_id; - __entry->node_debug_id = src_ref->node->debug_id; + __entry->node_debug_id = node->debug_id; __entry->src_ref_debug_id = src_ref->debug_id; __entry->src_ref_desc = src_ref->desc; __entry->dest_ref_debug_id = dest_ref->debug_id; @@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TP_ARGS(buffer)); TRACE_EVENT(binder_update_page_range, - TP_PROTO(struct binder_proc *proc, bool allocate, + TP_PROTO(struct binder_alloc *alloc, bool allocate, void *start, void *end), - TP_ARGS(proc, allocate, start, end), + TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) __field(bool, allocate) @@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range, __field(size_t, size) ), TP_fast_assign( - __entry->proc = proc->pid; + __entry->proc = alloc->pid; __entry->allocate = allocate; - __entry->offset = start - proc->buffer; + __entry->offset = start - alloc->buffer; __entry->size = end - start; ), TP_printk("proc=%d allocate=%d offset=%zu size=%zu", diff --git a/drivers/base/core.c b/drivers/base/core.c index 3fa9096b27c2..5a56a8e9f006 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2105,7 +2105,11 @@ void device_shutdown(void) pm_runtime_get_noresume(dev); pm_runtime_barrier(dev); - if (dev->bus && dev->bus->shutdown) { + if (dev->class && dev->class->shutdown) { + if (initcall_debug) + dev_info(dev, "shutdown\n"); + dev->class->shutdown(dev); + } else if (dev->bus && dev->bus->shutdown) { if (initcall_debug) dev_info(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9920916a6220..ae7f3ce90bd2 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -827,7 +827,7 @@ static ssize_t driver_override_store(struct device *dev, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); - char *driver_override, *old = pdev->driver_override, *cp; + char *driver_override, *old, *cp; if (count > PATH_MAX) return -EINVAL; @@ -840,12 +840,15 @@ static ssize_t driver_override_store(struct device *dev, if (cp) *cp = '\0'; + device_lock(dev); + old = pdev->driver_override; if (strlen(driver_override)) { pdev->driver_override = driver_override; } else { kfree(driver_override); pdev->driver_override = NULL; } + device_unlock(dev); kfree(old); @@ -856,8 +859,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); + ssize_t len; - return sprintf(buf, "%s\n", pdev->driver_override); + device_lock(dev); + len = sprintf(buf, "%s\n", pdev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a7b46798c81d..39efa7e6c0c0 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 0e494108c20c..7af116e12e53 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -61,6 +61,8 @@ static LIST_HEAD(wakeup_sources); static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue); +DEFINE_STATIC_SRCU(wakeup_srcu); + static struct wakeup_source deleted_ws = { .name = "deleted", .lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock), @@ -199,7 +201,7 @@ void wakeup_source_remove(struct wakeup_source *ws) spin_lock_irqsave(&events_lock, flags); list_del_rcu(&ws->entry); spin_unlock_irqrestore(&events_lock, flags); - synchronize_rcu(); + synchronize_srcu(&wakeup_srcu); } EXPORT_SYMBOL_GPL(wakeup_source_remove); @@ -331,13 +333,14 @@ void device_wakeup_detach_irq(struct device *dev) void device_wakeup_arm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->wakeirq) dev_pm_arm_wake_irq(ws->wakeirq); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -348,13 +351,14 @@ void device_wakeup_arm_wake_irqs(void) void device_wakeup_disarm_wake_irqs(void) { struct wakeup_source *ws; + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->wakeirq) dev_pm_disarm_wake_irq(ws->wakeirq); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } /** @@ -839,10 +843,10 @@ EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources); void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; - int active = 0; + int srcuidx, active = 0; struct wakeup_source *last_activity_ws = NULL; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { if (ws->active) { pr_info("active wakeup source: %s\n", ws->name); @@ -858,7 +862,7 @@ void pm_print_active_wakeup_sources(void) if (!active && last_activity_ws) pr_info("last active wakeup source: %s\n", last_activity_ws->name); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources); @@ -985,8 +989,9 @@ void pm_wakep_autosleep_enabled(bool set) { struct wakeup_source *ws; ktime_t now = ktime_get(); + int srcuidx; - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) { spin_lock_irq(&ws->lock); if (ws->autosleep_enabled != set) { @@ -1000,7 +1005,7 @@ void pm_wakep_autosleep_enabled(bool set) } spin_unlock_irq(&ws->lock); } - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); } #endif /* CONFIG_PM_AUTOSLEEP */ @@ -1061,15 +1066,16 @@ static int print_wakeup_source_stats(struct seq_file *m, static int wakeup_sources_stats_show(struct seq_file *m, void *unused) { struct wakeup_source *ws; + int srcuidx; seq_puts(m, "name\t\t\t\t\tactive_count\tevent_count\twakeup_count\t" "expire_count\tactive_since\ttotal_time\tmax_time\t" "last_change\tprevent_suspend_time\n"); - rcu_read_lock(); + srcuidx = srcu_read_lock(&wakeup_srcu); list_for_each_entry_rcu(ws, &wakeup_sources, entry) print_wakeup_source_stats(m, ws); - rcu_read_unlock(); + srcu_read_unlock(&wakeup_srcu, srcuidx); print_wakeup_source_stats(m, &deleted_ws); diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 781fa96726e2..8560a2b731b5 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -2500,6 +2500,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl) kref_init(&me->channel[cid].kref); pr_info("'opened /dev/%s c %d %d'\n", gcinfo[cid].name, MAJOR(me->dev_no), cid); + err = glink_queue_rx_intent(me->channel[cid].chan, NULL, 64); + if (err) + pr_info("adsprpc: initial intent failed for %d\n", cid); if (cid == 0 && me->channel[cid].ssrcount != me->channel[cid].prevssrcount) { if (fastrpc_mmap_remove_ssr(fl)) diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 252142524ff2..6d56877b2e0a 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -36,10 +36,60 @@ static DEFINE_SPINLOCK(driver_lock); struct class *tpm_class; dev_t tpm_devt; -/* - * tpm_chip_find_get - return tpm_chip for a given chip number - * @chip_num the device number for the chip +/** + * tpm_try_get_ops() - Get a ref to the tpm_chip + * @chip: Chip to ref + * + * The caller must already have some kind of locking to ensure that chip is + * valid. This function will lock the chip so that the ops member can be + * accessed safely. The locking prevents tpm_chip_unregister from + * completing, so it should not be held for long periods. + * + * Returns -ERRNO if the chip could not be got. + */ +int tpm_try_get_ops(struct tpm_chip *chip) +{ + int rc = -EIO; + + get_device(&chip->dev); + + down_read(&chip->ops_sem); + if (!chip->ops) + goto out_lock; + + if (!try_module_get(chip->dev.parent->driver->owner)) + goto out_lock; + + return 0; +out_lock: + up_read(&chip->ops_sem); + put_device(&chip->dev); + return rc; +} +EXPORT_SYMBOL_GPL(tpm_try_get_ops); + +/** + * tpm_put_ops() - Release a ref to the tpm_chip + * @chip: Chip to put + * + * This is the opposite pair to tpm_try_get_ops(). After this returns chip may + * be kfree'd. */ +void tpm_put_ops(struct tpm_chip *chip) +{ + module_put(chip->dev.parent->driver->owner); + up_read(&chip->ops_sem); + put_device(&chip->dev); +} +EXPORT_SYMBOL_GPL(tpm_put_ops); + +/** + * tpm_chip_find_get() - return tpm_chip for a given chip number + * @chip_num: id to find + * + * The return'd chip has been tpm_try_get_ops'd and must be released via + * tpm_put_ops + */ struct tpm_chip *tpm_chip_find_get(int chip_num) { struct tpm_chip *pos, *chip = NULL; @@ -49,10 +99,10 @@ struct tpm_chip *tpm_chip_find_get(int chip_num) if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num) continue; - if (try_module_get(pos->pdev->driver->owner)) { + /* rcu prevents chip from being free'd */ + if (!tpm_try_get_ops(pos)) chip = pos; - break; - } + break; } rcu_read_unlock(); return chip; @@ -74,6 +124,41 @@ static void tpm_dev_release(struct device *dev) kfree(chip); } + +/** + * tpm_class_shutdown() - prepare the TPM device for loss of power. + * @dev: device to which the chip is associated. + * + * Issues a TPM2_Shutdown command prior to loss of power, as required by the + * TPM 2.0 spec. + * Then, calls bus- and device- specific shutdown code. + * + * XXX: This codepath relies on the fact that sysfs is not enabled for + * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2 + * has sysfs support enabled before TPM sysfs's implicit locking is fixed. + */ +static int tpm_class_shutdown(struct device *dev) +{ + struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + down_write(&chip->ops_sem); + tpm2_shutdown(chip, TPM2_SU_CLEAR); + chip->ops = NULL; + up_write(&chip->ops_sem); + } + /* Allow bus- and device-specific code to run. Note: since chip->ops + * is NULL, more-specific shutdown code will not be able to issue TPM + * commands. + */ + if (dev->bus && dev->bus->shutdown) + dev->bus->shutdown(dev); + else if (dev->driver && dev->driver->shutdown) + dev->driver->shutdown(dev); + return 0; +} + + /** * tpmm_chip_alloc() - allocate a new struct tpm_chip instance * @dev: device to which the chip is associated @@ -94,6 +179,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, return ERR_PTR(-ENOMEM); mutex_init(&chip->tpm_mutex); + init_rwsem(&chip->ops_sem); INIT_LIST_HEAD(&chip->list); chip->ops = ops; @@ -112,13 +198,12 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num); - chip->pdev = dev; - dev_set_drvdata(dev, chip); chip->dev.class = tpm_class; + chip->dev.class->shutdown = tpm_class_shutdown; chip->dev.release = tpm_dev_release; - chip->dev.parent = chip->pdev; + chip->dev.parent = dev; #ifdef CONFIG_ACPI chip->dev.groups = chip->groups; #endif @@ -133,7 +218,7 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev, device_initialize(&chip->dev); cdev_init(&chip->cdev, &tpm_fops); - chip->cdev.owner = chip->pdev->driver->owner; + chip->cdev.owner = dev->driver->owner; chip->cdev.kobj.parent = &chip->dev.kobj; devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev); @@ -173,6 +258,12 @@ static int tpm_add_char_device(struct tpm_chip *chip) static void tpm_del_char_device(struct tpm_chip *chip) { cdev_del(&chip->cdev); + + /* Make the driver uncallable. */ + down_write(&chip->ops_sem); + chip->ops = NULL; + up_write(&chip->ops_sem); + device_del(&chip->dev); } @@ -236,9 +327,8 @@ int tpm_chip_register(struct tpm_chip *chip) chip->flags |= TPM_CHIP_FLAG_REGISTERED; if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { - rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj, - &chip->dev.kobj, - "ppi"); + rc = __compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); if (rc && rc != -ENOENT) { tpm_chip_unregister(chip); return rc; @@ -259,6 +349,9 @@ EXPORT_SYMBOL_GPL(tpm_chip_register); * Takes the chip first away from the list of available TPM chips and then * cleans up all the resources reserved by tpm_chip_register(). * + * Once this function returns the driver call backs in 'op's will not be + * running and will no longer start. + * * NOTE: This function should be only called before deinitializing chip * resources. */ @@ -273,7 +366,7 @@ void tpm_chip_unregister(struct tpm_chip *chip) synchronize_rcu(); if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) - sysfs_remove_link(&chip->pdev->kobj, "ppi"); + sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); tpm1_chip_unregister(chip); tpm_del_char_device(chip); diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index 4f3137d9a35e..912ad30be585 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -61,7 +61,7 @@ static int tpm_open(struct inode *inode, struct file *file) * by the check of is_open variable, which is protected * by driver_lock. */ if (test_and_set_bit(0, &chip->is_open)) { - dev_dbg(chip->pdev, "Another process owns this TPM\n"); + dev_dbg(&chip->dev, "Another process owns this TPM\n"); return -EBUSY; } @@ -79,7 +79,6 @@ static int tpm_open(struct inode *inode, struct file *file) INIT_WORK(&priv->work, timeout_work); file->private_data = priv; - get_device(chip->pdev); return 0; } @@ -137,9 +136,18 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, return -EFAULT; } - /* atomic tpm command send and result receive */ + /* atomic tpm command send and result receive. We only hold the ops + * lock during this period so that the tpm can be unregistered even if + * the char dev is held open. + */ + if (tpm_try_get_ops(priv->chip)) { + mutex_unlock(&priv->buffer_mutex); + return -EPIPE; + } out_size = tpm_transmit(priv->chip, priv->data_buffer, sizeof(priv->data_buffer), 0); + + tpm_put_ops(priv->chip); if (out_size < 0) { mutex_unlock(&priv->buffer_mutex); return out_size; @@ -166,7 +174,6 @@ static int tpm_release(struct inode *inode, struct file *file) file->private_data = NULL; atomic_set(&priv->data_pending, 0); clear_bit(0, &priv->chip->is_open); - put_device(priv->chip->pdev); kfree(priv); return 0; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index 17abe52e6365..8588f2e4b9af 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -343,7 +343,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, if (count == 0) return -ENODATA; if (count > bufsiz) { - dev_err(chip->pdev, + dev_err(&chip->dev, "invalid count value %x %zx\n", count, bufsiz); return -E2BIG; } @@ -353,7 +353,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, rc = chip->ops->send(chip, (u8 *) buf, count); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_send: error %zd\n", rc); goto out; } @@ -372,7 +372,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, goto out_recv; if (chip->ops->req_canceled(chip, status)) { - dev_err(chip->pdev, "Operation Canceled\n"); + dev_err(&chip->dev, "Operation Canceled\n"); rc = -ECANCELED; goto out; } @@ -382,14 +382,14 @@ ssize_t tpm_transmit(struct tpm_chip *chip, const u8 *buf, size_t bufsiz, } while (time_before(jiffies, stop)); chip->ops->cancel(chip); - dev_err(chip->pdev, "Operation Timed out\n"); + dev_err(&chip->dev, "Operation Timed out\n"); rc = -ETIME; goto out; out_recv: rc = chip->ops->recv(chip, (u8 *) buf, bufsiz); if (rc < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %zd\n", rc); out: if (!(flags & TPM_TRANSMIT_UNLOCKED)) @@ -416,7 +416,7 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd, err = be32_to_cpu(header->return_code); if (err != 0 && desc) - dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err, + dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); return err; @@ -514,7 +514,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) if (rc == TPM_ERR_INVALID_POSTINIT) { /* The TPM is not started, we are the first to talk to it. Execute a startup command. */ - dev_info(chip->pdev, "Issuing TPM_STARTUP"); + dev_info(&chip->dev, "Issuing TPM_STARTUP"); if (tpm_startup(chip, TPM_ST_CLEAR)) return rc; @@ -526,7 +526,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) 0, NULL); } if (rc) { - dev_err(chip->pdev, + dev_err(&chip->dev, "A TPM error (%zd) occurred attempting to determine the timeouts\n", rc); goto duration; @@ -565,7 +565,7 @@ int tpm_get_timeouts(struct tpm_chip *chip) /* Report adjusted timeouts */ if (chip->vendor.timeout_adjusted) { - dev_info(chip->pdev, + dev_info(&chip->dev, HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n", old_timeout[0], new_timeout[0], old_timeout[1], new_timeout[1], @@ -612,7 +612,7 @@ duration: chip->vendor.duration[TPM_MEDIUM] *= 1000; chip->vendor.duration[TPM_LONG] *= 1000; chip->vendor.duration_adjusted = true; - dev_info(chip->pdev, "Adjusting TPM timeout parameters."); + dev_info(&chip->dev, "Adjusting TPM timeout parameters."); } return 0; } @@ -687,7 +687,7 @@ int tpm_is_tpm2(u32 chip_num) rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0; - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -716,7 +716,7 @@ int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) rc = tpm2_pcr_read(chip, pcr_idx, res_buf); else rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_read); @@ -751,7 +751,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm2_pcr_extend(chip, pcr_idx, hash); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } @@ -761,7 +761,7 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0, "attempting extend a PCR value"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_pcr_extend); @@ -802,7 +802,9 @@ int tpm_do_selftest(struct tpm_chip *chip) * around 300ms while the self test is ongoing, keep trying * until the self test duration expires. */ if (rc == -ETIME) { - dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test"); + dev_info( + &chip->dev, HW_ERR + "TPM command timed out during continue self test"); msleep(delay_msec); continue; } @@ -812,7 +814,7 @@ int tpm_do_selftest(struct tpm_chip *chip) rc = be32_to_cpu(cmd.header.out.return_code); if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) { - dev_info(chip->pdev, + dev_info(&chip->dev, "TPM is disabled/deactivated (0x%X)\n", rc); /* TPM is disabled and/or deactivated; driver can * proceed and TPM does handle commands for @@ -840,7 +842,7 @@ int tpm_send(u32 chip_num, void *cmd, size_t buflen) rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd"); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_send); @@ -966,10 +968,10 @@ int tpm_pm_suspend(struct device *dev) } if (rc) - dev_err(chip->pdev, + dev_err(&chip->dev, "Error (%d) sending savestate before suspend\n", rc); else if (try > 0) - dev_warn(chip->pdev, "TPM savestate took %dms\n", + dev_warn(&chip->dev, "TPM savestate took %dms\n", try * TPM_TIMEOUT_RETRY); return rc; @@ -1023,7 +1025,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) if (chip->flags & TPM_CHIP_FLAG_TPM2) { err = tpm2_get_random(chip, out, max); - tpm_chip_put(chip); + tpm_put_ops(chip); return err; } @@ -1045,7 +1047,7 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) num_bytes -= recd; } while (retries-- && total < max); - tpm_chip_put(chip); + tpm_put_ops(chip); return total ? total : -EIO; } EXPORT_SYMBOL_GPL(tpm_get_random); @@ -1071,7 +1073,7 @@ int tpm_seal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_seal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_seal_trusted); @@ -1097,7 +1099,8 @@ int tpm_unseal_trusted(u32 chip_num, struct trusted_key_payload *payload, rc = tpm2_unseal_trusted(chip, payload, options); - tpm_chip_put(chip); + tpm_put_ops(chip); + return rc; } EXPORT_SYMBOL_GPL(tpm_unseal_trusted); diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index f880856aa75e..6a4056a3f7ee 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -284,16 +284,28 @@ static const struct attribute_group tpm_dev_group = { int tpm_sysfs_add_device(struct tpm_chip *chip) { int err; - err = sysfs_create_group(&chip->pdev->kobj, + + /* XXX: If you wish to remove this restriction, you must first update + * tpm_sysfs to explicitly lock chip->ops. + */ + if (chip->flags & TPM_CHIP_FLAG_TPM2) + return 0; + + err = sysfs_create_group(&chip->dev.parent->kobj, &tpm_dev_group); if (err) - dev_err(chip->pdev, + dev_err(&chip->dev, "failed to create sysfs attributes, %d\n", err); return err; } void tpm_sysfs_del_device(struct tpm_chip *chip) { - sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group); + /* The sysfs routines rely on an implicit tpm_try_get_ops, this + * function is called before ops is null'd and the sysfs core + * synchronizes this removal so that no callbacks are running or can + * run again + */ + sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group); } diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 2216861f89f1..e21e2c599e66 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -171,11 +171,16 @@ enum tpm_chip_flags { }; struct tpm_chip { - struct device *pdev; /* Device stuff */ struct device dev; struct cdev cdev; + /* A driver callback under ops cannot be run unless ops_sem is held + * (sometimes implicitly, eg for the sysfs code). ops becomes null + * when the driver is unregistered, see tpm_try_get_ops. + */ + struct rw_semaphore ops_sem; const struct tpm_class_ops *ops; + unsigned int flags; int dev_num; /* /dev/tpm# */ @@ -201,11 +206,6 @@ struct tpm_chip { #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) -static inline void tpm_chip_put(struct tpm_chip *chip) -{ - module_put(chip->pdev->driver->owner); -} - static inline int tpm_read_index(int base, int index) { outb(index, base); @@ -517,6 +517,9 @@ extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long, wait_queue_head_t *, bool); struct tpm_chip *tpm_chip_find_get(int chip_num); +__must_check int tpm_try_get_ops(struct tpm_chip *chip); +void tpm_put_ops(struct tpm_chip *chip); + extern struct tpm_chip *tpmm_chip_alloc(struct device *dev, const struct tpm_class_ops *ops); extern int tpm_chip_register(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index cb7e4f6b70ba..286bd090a488 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -570,7 +570,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT); if (rc) { - dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n", + dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n", handle); return; } @@ -580,7 +580,7 @@ static void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags, "flushing context"); if (rc) - dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle, + dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle, rc); tpm_buf_destroy(&buf); @@ -753,7 +753,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) * except print the error code on a system failure. */ if (rc < 0) - dev_warn(chip->pdev, "transmit returned %d while stopping the TPM", + dev_warn(&chip->dev, "transmit returned %d while stopping the TPM", rc); } EXPORT_SYMBOL_GPL(tpm2_shutdown); @@ -820,7 +820,7 @@ static int tpm2_start_selftest(struct tpm_chip *chip, bool full) * immediately. This is a workaround for that. */ if (rc == TPM2_RC_TESTING) { - dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n"); + dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); rc = 0; } diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index dfadad0916a1..a48a878f791d 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -49,7 +49,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) for (i = 0; i < 6; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading header\n"); + dev_err(&chip->dev, "error reading header\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); @@ -60,12 +60,12 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) size = be32_to_cpu(*native_size); if (count < size) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Recv size(%d) less than available space\n", size); for (; i < size; i++) { /* clear the waiting data anyway */ status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } } @@ -76,7 +76,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) for (; i < size; i++) { status = ioread8(chip->vendor.iobase + 1); if ((status & ATML_STATUS_DATA_AVAIL) == 0) { - dev_err(chip->pdev, "error reading data\n"); + dev_err(&chip->dev, "error reading data\n"); return -EIO; } *buf++ = ioread8(chip->vendor.iobase); @@ -86,7 +86,7 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count) status = ioread8(chip->vendor.iobase + 1); if (status & ATML_STATUS_DATA_AVAIL) { - dev_err(chip->pdev, "data available is stuck\n"); + dev_err(&chip->dev, "data available is stuck\n"); return -EIO; } @@ -97,9 +97,9 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) { int i; - dev_dbg(chip->pdev, "tpm_atml_send:\n"); + dev_dbg(&chip->dev, "tpm_atml_send:\n"); for (i = 0; i < count; i++) { - dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); + dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]); iowrite8(buf[i], chip->vendor.iobase); } diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 8dfb88b9739c..dd8f0eb3170a 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -52,7 +52,7 @@ struct priv_data { static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; priv->len = 0; @@ -62,7 +62,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) status = i2c_master_send(client, buf, len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); return status; @@ -71,7 +71,7 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); struct tpm_output_header *hdr = (struct tpm_output_header *)priv->buffer; u32 expected_len; @@ -88,7 +88,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) return -ENOMEM; if (priv->len >= expected_len) { - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -97,7 +97,7 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) } rc = i2c_master_recv(client, buf, expected_len); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__, (int)min_t(size_t, 64, expected_len), buf, count, expected_len); @@ -106,13 +106,13 @@ static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) static void i2c_atmel_cancel(struct tpm_chip *chip) { - dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported"); + dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported"); } static u8 i2c_atmel_read_status(struct tpm_chip *chip) { struct priv_data *priv = chip->vendor.priv; - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); int rc; /* The TPM fails the I2C read until it is ready, so we do the entire @@ -125,7 +125,7 @@ static u8 i2c_atmel_read_status(struct tpm_chip *chip) /* Once the TPM has completed the command the command remains readable * until another command is issued. */ rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer)); - dev_dbg(chip->pdev, + dev_dbg(&chip->dev, "%s: sts=%d", __func__, rc); if (rc <= 0) return 0; diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 63d5d22e9e60..f2aa99e34b4b 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -446,7 +446,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read first 10 bytes, including tag, paramsize, and result */ size = recv_data(chip, buf, TPM_HEADER_SIZE); if (size < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); + dev_err(&chip->dev, "Unable to read header\n"); goto out; } @@ -459,14 +459,14 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE); if (size < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); + dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); + dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 847f1597fe9b..a1e1474dda30 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -96,13 +96,13 @@ static s32 i2c_nuvoton_write_buf(struct i2c_client *client, u8 offset, u8 size, /* read TPM_STS register */ static u8 i2c_nuvoton_read_status(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; u8 data; status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data); if (status <= 0) { - dev_err(chip->pdev, "%s() error return %d\n", __func__, + dev_err(&chip->dev, "%s() error return %d\n", __func__, status); data = TPM_STS_ERR_VAL; } @@ -127,13 +127,13 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) /* write commandReady to TPM_STS register */ static void i2c_nuvoton_ready(struct tpm_chip *chip) { - struct i2c_client *client = to_i2c_client(chip->pdev); + struct i2c_client *client = to_i2c_client(chip->dev.parent); s32 status; /* this causes the current command to be aborted */ status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY); if (status < 0) - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to write TPM_STS.commandReady\n", __func__); } @@ -212,7 +212,7 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, return 0; } while (time_before(jiffies, stop)); } - dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask, + dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask, value); return -ETIMEDOUT; } @@ -240,7 +240,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, &chip->vendor.read_queue) == 0) { burst_count = i2c_nuvoton_get_burstcount(client, chip); if (burst_count < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail to read burstCount=%d\n", __func__, burst_count); return -EIO; @@ -249,12 +249,12 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R, bytes2read, &buf[size]); if (rc < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "%s() fail on i2c_nuvoton_read_buf()=%d\n", __func__, rc); return -EIO; } - dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read); + dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read); size += bytes2read; } @@ -264,7 +264,7 @@ static int i2c_nuvoton_recv_data(struct i2c_client *client, /* Read TPM command results */ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) { - struct device *dev = chip->pdev; + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; int expected, status, burst_count, retries, size = 0; @@ -334,7 +334,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) break; } i2c_nuvoton_ready(chip); - dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size); + dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size); return size; } @@ -347,7 +347,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) */ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) { - struct device *dev = chip->pdev; + struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); u32 ordinal; size_t count = 0; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index 6c488e635fdd..e3cf9f3545c5 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -195,9 +195,9 @@ static int wait(struct tpm_chip *chip, int wait_for_bit) } if (i == TPM_MAX_TRIES) { /* timeout occurs */ if (wait_for_bit == STAT_XFE) - dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n"); if (wait_for_bit == STAT_RDA) - dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n"); + dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n"); return -EIO; } return 0; @@ -220,7 +220,7 @@ static void wait_and_send(struct tpm_chip *chip, u8 sendbyte) static void tpm_wtx(struct tpm_chip *chip) { number_of_wtx++; - dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n", + dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n", number_of_wtx, TPM_MAX_WTX_PACKAGES); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX); @@ -231,7 +231,7 @@ static void tpm_wtx(struct tpm_chip *chip) static void tpm_wtx_abort(struct tpm_chip *chip) { - dev_info(chip->pdev, "Aborting WTX\n"); + dev_info(&chip->dev, "Aborting WTX\n"); wait_and_send(chip, TPM_VL_VER); wait_and_send(chip, TPM_CTRL_WTX_ABORT); wait_and_send(chip, 0x00); @@ -257,7 +257,7 @@ recv_begin: } if (buf[0] != TPM_VL_VER) { - dev_err(chip->pdev, + dev_err(&chip->dev, "Wrong transport protocol implementation!\n"); return -EIO; } @@ -272,7 +272,7 @@ recv_begin: } if ((size == 0x6D00) && (buf[1] == 0x80)) { - dev_err(chip->pdev, "Error handling on vendor layer!\n"); + dev_err(&chip->dev, "Error handling on vendor layer!\n"); return -EIO; } @@ -284,7 +284,7 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX) { - dev_info(chip->pdev, "WTX-package received\n"); + dev_info(&chip->dev, "WTX-package received\n"); if (number_of_wtx < TPM_MAX_WTX_PACKAGES) { tpm_wtx(chip); goto recv_begin; @@ -295,14 +295,14 @@ recv_begin: } if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) { - dev_info(chip->pdev, "WTX-abort acknowledged\n"); + dev_info(&chip->dev, "WTX-abort acknowledged\n"); return size; } if (buf[1] == TPM_CTRL_ERROR) { - dev_err(chip->pdev, "ERROR-package received:\n"); + dev_err(&chip->dev, "ERROR-package received:\n"); if (buf[4] == TPM_INF_NAK) - dev_err(chip->pdev, + dev_err(&chip->dev, "-> Negative acknowledgement" " - retransmit command!\n"); return -EIO; @@ -321,7 +321,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) ret = empty_fifo(chip, 1); if (ret) { - dev_err(chip->pdev, "Timeout while clearing FIFO\n"); + dev_err(&chip->dev, "Timeout while clearing FIFO\n"); return -EIO; } diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 289389ecef84..766370bed60c 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -113,7 +113,7 @@ static int nsc_wait_for_ready(struct tpm_chip *chip) } while (time_before(jiffies, stop)); - dev_info(chip->pdev, "wait for ready failed\n"); + dev_info(&chip->dev, "wait for ready failed\n"); return -EBUSY; } @@ -129,12 +129,12 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) return -EIO; if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) { - dev_err(chip->pdev, "F0 timeout\n"); + dev_err(&chip->dev, "F0 timeout\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) { - dev_err(chip->pdev, "not in normal mode (0x%x)\n", + dev_err(&chip->dev, "not in normal mode (0x%x)\n", data); return -EIO; } @@ -143,7 +143,7 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) for (p = buffer; p < &buffer[count]; p++) { if (wait_for_stat (chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "OBF timeout (while reading data)\n"); return -EIO; } @@ -154,11 +154,11 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count) if ((data & NSC_STATUS_F0) == 0 && (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) { - dev_err(chip->pdev, "F0 not set\n"); + dev_err(&chip->dev, "F0 not set\n"); return -EIO; } if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) { - dev_err(chip->pdev, + dev_err(&chip->dev, "expected end of command(0x%x)\n", data); return -EIO; } @@ -189,19 +189,19 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) return -EIO; if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND); if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) { - dev_err(chip->pdev, "IBR timeout\n"); + dev_err(&chip->dev, "IBR timeout\n"); return -EIO; } for (i = 0; i < count; i++) { if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, + dev_err(&chip->dev, "IBF timeout (while writing data)\n"); return -EIO; } @@ -209,7 +209,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) } if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) { - dev_err(chip->pdev, "IBF timeout\n"); + dev_err(&chip->dev, "IBF timeout\n"); return -EIO; } outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND); diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index f10a107614b4..7f13221aeb30 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -293,7 +293,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) /* read first 10 bytes, including tag, paramsize, and result */ if ((size = recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) { - dev_err(chip->pdev, "Unable to read header\n"); + dev_err(&chip->dev, "Unable to read header\n"); goto out; } @@ -306,7 +306,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) if ((size += recv_data(chip, &buf[TPM_HEADER_SIZE], expected - TPM_HEADER_SIZE)) < expected) { - dev_err(chip->pdev, "Unable to read remainder of result\n"); + dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; goto out; } @@ -315,7 +315,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) &chip->vendor.int_queue, false); status = tpm_tis_status(chip); if (status & TPM_STS_DATA_AVAIL) { /* retry? */ - dev_err(chip->pdev, "Error left over data\n"); + dev_err(&chip->dev, "Error left over data\n"); size = -EIO; goto out; } @@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip) iowrite32(intmask, chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality)); - devm_free_irq(chip->pdev, chip->vendor.irq, chip); + devm_free_irq(&chip->dev, chip->vendor.irq, chip); chip->vendor.irq = 0; } @@ -463,7 +463,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len) msleep(1); if (!priv->irq_tested) { disable_interrupts(chip); - dev_err(chip->pdev, + dev_err(&chip->dev, FW_BUG "TPM interrupt not working, polling instead\n"); } priv->irq_tested = true; @@ -533,7 +533,7 @@ static int probe_itpm(struct tpm_chip *chip) rc = tpm_tis_send_data(chip, cmd_getticks, len); if (rc == 0) { - dev_info(chip->pdev, "Detected an iTPM.\n"); + dev_info(&chip->dev, "Detected an iTPM.\n"); rc = 1; } else rc = -EFAULT; @@ -766,7 +766,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, if (devm_request_irq (dev, i, tis_int_probe, IRQF_SHARED, chip->devname, chip) != 0) { - dev_info(chip->pdev, + dev_info(&chip->dev, "Unable to request irq: %d for probe\n", i); continue; @@ -818,7 +818,7 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info, if (devm_request_irq (dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED, chip->devname, chip) != 0) { - dev_info(chip->pdev, + dev_info(&chip->dev, "Unable to request irq: %d for use\n", chip->vendor.irq); chip->vendor.irq = 0; diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index b91e115462ae..abbee61c99c8 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -479,6 +479,7 @@ static void cpufreq_interactive_timer(unsigned long data) bool skip_hispeed_logic, skip_min_sample_time; bool jump_to_max_no_ts = false; bool jump_to_max = false; + bool start_hyst = true; if (!down_read_trylock(&ppol->enable_sem)) return; @@ -588,8 +589,12 @@ static void cpufreq_interactive_timer(unsigned long data) } if (now - ppol->max_freq_hyst_start_time < - tunables->max_freq_hysteresis) + tunables->max_freq_hysteresis) { + if (new_freq < ppol->policy->max && + ppol->policy->max <= tunables->hispeed_freq) + start_hyst = false; new_freq = max(tunables->hispeed_freq, new_freq); + } if (!skip_hispeed_logic && ppol->target_freq >= tunables->hispeed_freq && @@ -646,7 +651,7 @@ static void cpufreq_interactive_timer(unsigned long data) ppol->floor_validate_time = now; } - if (new_freq >= ppol->policy->max && !jump_to_max_no_ts) + if (start_hyst && new_freq >= ppol->policy->max && !jump_to_max_no_ts) ppol->max_freq_hyst_start_time = now; if (ppol->target_freq == new_freq && diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 0dadb6332f0e..7abe908427df 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = atmel_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 99d5e11db194..e06cc5df30be 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -498,7 +498,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index e1eaf4ff9762..3ce1d5cdcbd2 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c index 49165daa807f..490f8d9ddb9f 100644 --- a/drivers/crypto/msm/ice.c +++ b/drivers/crypto/msm/ice.c @@ -975,7 +975,8 @@ static int qcom_ice_secure_ice_init(struct ice_device *ice_dev) static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) { - int ret = 0, scm_ret = 0; + int ret = 0; + u64 scm_ret = 0; /* scm command buffer structure */ struct qcom_scm_cmd_buf { @@ -1001,7 +1002,7 @@ static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev) cbuf.device_id = ICE_TZ_DEV_ID; ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret); if (ret || scm_ret) { - pr_err("%s: failed, ret %d scm_ret %d\n", + pr_err("%s: failed, ret %d scm_ret %llu\n", __func__, ret, scm_ret); if (!ret) ret = scm_ret; diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c index 4ab8ca143f6c..b44f926a6ba0 100644 --- a/drivers/crypto/msm/qce50.c +++ b/drivers/crypto/msm/qce50.c @@ -2155,6 +2155,10 @@ static int _sha_complete(struct qce_device *pce_dev, int req_info) pce_sps_data = &preq_info->ce_sps; qce_callback = preq_info->qce_cb; areq = (struct ahash_request *) preq_info->areq; + if (!areq) { + pr_err("sha operation error. areq is NULL\n"); + return -ENXIO; + } qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents, DMA_TO_DEVICE); memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]), @@ -2970,7 +2974,7 @@ static inline int qce_alloc_req_info(struct qce_device *pce_dev) request_index++; if (request_index >= MAX_QCE_BAM_REQ) request_index = 0; - if (xchg(&pce_dev->ce_request_info[request_index]. + if (atomic_xchg(&pce_dev->ce_request_info[request_index]. in_use, true) == false) { pce_dev->ce_request_index = request_index; return request_index; @@ -2986,7 +2990,8 @@ static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, bool is_complete) { pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST; - if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) { + if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use, + false) == true) { if (req_info < MAX_QCE_BAM_REQ && is_complete) atomic_dec(&pce_dev->no_of_queued_req); } else @@ -4610,7 +4615,7 @@ static int qce_dummy_req(struct qce_device *pce_dev) { int ret = 0; - if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX]. + if (!(atomic_xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX]. in_use, true) == false)) return -EBUSY; ret = qce_process_sha_req(pce_dev, NULL); @@ -5969,7 +5974,7 @@ void *qce_open(struct platform_device *pdev, int *rc) } for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) - pce_dev->ce_request_info[i].in_use = false; + atomic_set(&pce_dev->ce_request_info[i].in_use, false); pce_dev->ce_request_index = 0; pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ; @@ -6133,12 +6138,13 @@ EXPORT_SYMBOL(qce_hw_support); void qce_dump_req(void *handle) { int i; + bool req_in_use; struct qce_device *pce_dev = (struct qce_device *)handle; for (i = 0; i < MAX_QCE_BAM_REQ; i++) { - pr_info("qce_dump_req %d %d\n", i, - pce_dev->ce_request_info[i].in_use); - if (pce_dev->ce_request_info[i].in_use == true) + req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use); + pr_info("qce_dump_req %d %d\n", i, req_in_use); + if (req_in_use == true) _qce_dump_descr_fifos(pce_dev, i); } } diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h index 6dba3664ff08..ab0d21da72c5 100644 --- a/drivers/crypto/msm/qce50.h +++ b/drivers/crypto/msm/qce50.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -214,7 +214,7 @@ struct ce_sps_data { }; struct ce_request_info { - bool in_use; + atomic_t in_use; bool in_prog; enum qce_xfer_type_enum xfer_type; struct ce_sps_data ce_sps; diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index 5b364f053b1b..f38fc422b35e 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -3972,6 +3972,7 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE); if (len <= SHA1_BLOCK_SIZE) { memcpy(&sha_ctx->authkey[0], key, len); @@ -3979,16 +3980,19 @@ static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, } else { sha_ctx->alg = QCE_HASH_SHA1; sha_ctx->diglen = SHA1_DIGEST_SIZE; - _sha_hmac_setkey(tfm, key, len); + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA1 hmac setkey failed\n"); sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE; } - return 0; + return ret; } static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int len) { struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE); if (len <= SHA256_BLOCK_SIZE) { @@ -3997,11 +4001,13 @@ static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, } else { sha_ctx->alg = QCE_HASH_SHA256; sha_ctx->diglen = SHA256_DIGEST_SIZE; - _sha_hmac_setkey(tfm, key, len); + ret = _sha_hmac_setkey(tfm, key, len); + if (ret) + pr_err("SHA256 hmac setkey failed\n"); sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE; } - return 0; + return ret; } static int _sha_hmac_init_ihash(struct ahash_request *req, diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 9a8a18aafd5c..6a60936b46e0 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -804,7 +804,7 @@ static void talitos_unregister_rng(struct device *dev) * crypto alg */ #define TALITOS_CRA_PRIORITY 3000 -#define TALITOS_MAX_KEY_SIZE 96 +#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ struct talitos_ctx { @@ -1388,6 +1388,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, { struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); + if (keylen > TALITOS_MAX_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + } + memcpy(&ctx->key, key, keylen); ctx->keylen = keylen; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 39b8e171cad5..47c1747e7ae3 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2716,6 +2716,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, #define VENDOR_BLOCK 0x03 #define SPEAKER_BLOCK 0x04 #define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06 +#define COLORIMETRY_EXTENDED_DATA_BLOCK 0x05 #define EXTENDED_TAG 0x07 #define VIDEO_CAPABILITY_BLOCK 0x07 #define Y420_VIDEO_DATA_BLOCK 0x0E @@ -3526,11 +3527,17 @@ drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db) (db[2] & (BIT(3) | BIT(2))) >> 2; connector->ce_scan_info = db[2] & (BIT(1) | BIT(0)); + connector->rgb_qs = + db[2] & BIT(6); + connector->yuv_qs = + db[2] & BIT(7); DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)", (int) connector->pt_scan_info, (int) connector->it_scan_info, (int) connector->ce_scan_info); + DRM_DEBUG_KMS("rgb_quant_range_select %d", connector->rgb_qs); + DRM_DEBUG_KMS("ycc_quant_range_select %d", connector->yuv_qs); } static bool drm_edid_is_luminance_value_present( @@ -3589,6 +3596,50 @@ drm_extract_hdr_db(struct drm_connector *connector, const u8 *db) } /* + * drm_extract_colorimetry_db - Parse the HDMI colorimetry extended block + * @connector: connector corresponding to the HDMI sink + * @db: start of the HDMI colorimetry extended block + * + * Parses the HDMI colorimetry block to extract sink info for @connector. + */ +static void +drm_extract_clrmetry_db(struct drm_connector *connector, const u8 *db) +{ + + if (!db) { + DRM_ERROR("invalid db\n"); + return; + } + + /* Bit 0: xvYCC_601 */ + if (db[2] & BIT(0)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_601; + /* Bit 0: xvYCC_709 */ + if (db[2] & BIT(1)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_xvYCC_709; + /* Bit 0: sYCC_601 */ + if (db[2] & BIT(2)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_sYCC_601; + /* Bit 0: ADBYCC_601 */ + if (db[2] & BIT(3)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADBYCC_601; + /* Bit 0: ADB_RGB */ + if (db[2] & BIT(4)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_ADB_RGB; + /* Bit 0: BT2020_CYCC */ + if (db[2] & BIT(5)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_CYCC; + /* Bit 0: BT2020_YCC */ + if (db[2] & BIT(6)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_YCC; + /* Bit 0: BT2020_RGB */ + if (db[2] & BIT(7)) + connector->color_enc_fmt |= DRM_EDID_COLORIMETRY_BT2020_RGB; + + DRM_DEBUG_KMS("colorimetry fmt 0x%x\n", connector->color_enc_fmt); +} + +/* * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks * @connector: connector corresponding to the HDMI sink * @edid: handle to the EDID structure @@ -3620,6 +3671,9 @@ struct edid *edid) case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK: drm_extract_hdr_db(connector, db); break; + case COLORIMETRY_EXTENDED_DATA_BLOCK: + drm_extract_clrmetry_db(connector, db); + break; default: break; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index e637237fa811..c30b65785ab6 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -70,6 +70,8 @@ struct a5xx_gpu { * PREEMPT_NONE - no preemption in progress. Next state START. * PREEMPT_START - The trigger is evaulating if preemption is possible. Next * states: TRIGGERED, NONE + * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next + * state: NONE. * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next * states: FAULTED, PENDING * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger @@ -81,6 +83,7 @@ struct a5xx_gpu { enum preempt_state { PREEMPT_NONE = 0, PREEMPT_START, + PREEMPT_ABORT, PREEMPT_TRIGGERED, PREEMPT_FAULTED, PREEMPT_PENDING, @@ -184,7 +187,10 @@ int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot); /* Return true if we are in a preempt state */ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) { - return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE); + int preempt_state = atomic_read(&a5xx_gpu->preempt_state); + + return !(preempt_state == PREEMPT_NONE || + preempt_state == PREEMPT_ABORT); } int a5xx_counters_init(struct adreno_gpu *adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 6ab3ba076c2f..44d4ca35fa09 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -128,9 +128,20 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu) * one do nothing except to update the wptr to the latest and greatest */ if (!ring || (a5xx_gpu->cur_ring == ring)) { - update_wptr(gpu, ring); - - /* Set the state back to NONE */ + /* + * Its possible that while a preemption request is in progress + * from an irq context, a user context trying to submit might + * fail to update the write pointer, because it determines + * that the preempt state is not PREEMPT_NONE. + * + * Close the race by introducing an intermediate + * state PREEMPT_ABORT to let the submit path + * know that the ringbuffer is not going to change + * and can safely update the write pointer. + */ + + set_preempt_state(a5xx_gpu, PREEMPT_ABORT); + update_wptr(gpu, a5xx_gpu->cur_ring); set_preempt_state(a5xx_gpu, PREEMPT_NONE); return; } diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c index e4eb531c12aa..17ef86f1ba4f 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c @@ -351,6 +351,7 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, scrambler_on = true; tmds_clock_ratio = 1; } else { + tmds_clock_ratio = 0; scrambler_on = connector->supports_scramble; } @@ -396,6 +397,14 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi, rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi, HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync); } else { + /* reset tmds clock ratio */ + rc = sde_hdmi_scdc_write(hdmi, + HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE, + tmds_clock_ratio); + /* scdc write can fail if sink doesn't support SCDC */ + if (rc && connector->scdc_present) + SDE_ERROR("SCDC present, TMDS clk ratio err\n"); + sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0); reg_val = hdmi_read(hdmi, REG_HDMI_CTRL); reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */ diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 705ec2d0dfa2..a84d65195363 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1260,12 +1260,6 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto power_error; } - rc = sde_splash_parse_dt(dev); - if (rc) { - SDE_ERROR("parse dt for splash info failed: %d\n", rc); - goto power_error; - } - /* * Read the DISP_INTF_SEL register to check * whether early display is enabled in LK. @@ -1277,15 +1271,23 @@ static int sde_kms_hw_init(struct msm_kms *kms) } /* - * when LK has enabled early display, sde_splash_init should be - * called first. This function will first do bandwidth voting job - * because display hardware is accessing AHB data bus, otherwise - * device reboot will happen. Second is to check if the memory is - * reserved. + * when LK has enabled early display, sde_splash_parse_dt and + * sde_splash_init must be called. The first function is to parse the + * mandatory memory node for splash function, and the second function + * will first do bandwidth voting job, because display hardware is now + * accessing AHB data bus, otherwise device reboot will happen, and then + * to check if the memory is reserved. */ sinfo = &sde_kms->splash_info; - if (sinfo->handoff) + if (sinfo->handoff) { + rc = sde_splash_parse_dt(dev); + if (rc) { + SDE_ERROR("parse dt for splash info failed: %d\n", rc); + goto power_error; + } + sde_splash_init(&priv->phandle, kms); + } for (i = 0; i < sde_kms->catalog->vbif_count; i++) { u32 vbif_idx = sde_kms->catalog->vbif[i].id; diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c index 8b2894163eae..79989f3ac1e9 100644 --- a/drivers/gpu/drm/msm/sde/sde_splash.c +++ b/drivers/gpu/drm/msm/sde/sde_splash.c @@ -529,7 +529,8 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, /* When both hdmi's and dsi's resource are freed, * 1. Destroy splash node objects. - * 2. Decrease ref count in bandwidth voting function. + * 2. Release the memory which LK's stack is running on. + * 3. Withdraw AHB data bus bandwidth voting. */ if (sinfo->hdmi_connector_cnt == 0 && sinfo->dsi_connector_cnt == 0) { @@ -539,8 +540,12 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, _sde_splash_destroy_splash_node(sinfo); + _sde_splash_free_bootup_memory_to_system(sinfo->lk_pool_paddr, + sinfo->lk_pool_size); + sde_power_data_bus_bandwidth_ctrl(phandle, sde_kms->core_client, false); + return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index f300eba95bb1..1244cdf52859 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -81,8 +81,10 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, return -ENOMEM; size = roundup(size, PAGE_SIZE); ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size); - if (ret != 0) + if (ret != 0) { + kfree(bo); return ret; + } bo->dumb = false; virtio_gpu_init_ttm_placement(bo, pinned); diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 6521ec01413e..afa71116c691 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1309,6 +1309,10 @@ static int _adreno_start(struct adreno_device *adreno_dev) /* make sure ADRENO_DEVICE_STARTED is not set here */ BUG_ON(test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv)); + /* disallow l2pc during wake up to improve GPU wake up time */ + kgsl_pwrctrl_update_l2pc(&adreno_dev->dev, + KGSL_L2PC_WAKEUP_TIMEOUT); + pm_qos_update_request(&device->pwrctrl.pm_qos_req_dma, pmqos_wakeup_vote); diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 218d08e6dfc3..4a0acdcf8844 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -568,6 +568,8 @@ enum adreno_regs { ADRENO_REG_RBBM_RBBM_CTL, ADRENO_REG_UCHE_INVALIDATE0, ADRENO_REG_UCHE_INVALIDATE1, + ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, ADRENO_REG_RBBM_SECVID_TRUST_CONTROL, @@ -1508,21 +1510,60 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb, spin_unlock_irqrestore(&rb->preempt_lock, flags); } +static inline bool is_power_counter_overflow(struct adreno_device *adreno_dev, + unsigned int reg, unsigned int prev_val, unsigned int *perfctr_pwr_hi) +{ + unsigned int val; + bool ret = false; + + /* + * If prev_val is zero, it is first read after perf counter reset. + * So set perfctr_pwr_hi register to zero. + */ + if (prev_val == 0) { + *perfctr_pwr_hi = 0; + return ret; + } + adreno_readreg(adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, &val); + if (val != *perfctr_pwr_hi) { + *perfctr_pwr_hi = val; + ret = true; + } + return ret; +} + static inline unsigned int counter_delta(struct kgsl_device *device, unsigned int reg, unsigned int *counter) { + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); unsigned int val; unsigned int ret = 0; + bool overflow = true; + static unsigned int perfctr_pwr_hi; /* Read the value */ kgsl_regread(device, reg, &val); + if (adreno_is_a5xx(adreno_dev) && reg == adreno_getreg + (adreno_dev, ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO)) + overflow = is_power_counter_overflow(adreno_dev, reg, + *counter, &perfctr_pwr_hi); + /* Return 0 for the first read */ if (*counter != 0) { - if (val < *counter) - ret = (0xFFFFFFFF - *counter) + val; - else + if (val >= *counter) { ret = val - *counter; + } else if (overflow == true) { + ret = (0xFFFFFFFF - *counter) + val; + } else { + /* + * Since KGSL got abnormal value from the counter, + * We will drop the value from being accumulated. + */ + pr_warn_once("KGSL: Abnormal value :0x%x (0x%x) from perf counter : 0x%x\n", + val, *counter, reg); + return 0; + } } *counter = val; diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 423071811b43..0e3e5b64bdc7 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1530,6 +1530,10 @@ static unsigned int a3xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { A3XX_UCHE_CACHE_INVALIDATE0_REG), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A3XX_UCHE_CACHE_INVALIDATE1_REG), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A3XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A3XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A3XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c index 5ca04e522270..6170cc263e4a 100644 --- a/drivers/gpu/msm/adreno_a4xx.c +++ b/drivers/gpu/msm/adreno_a4xx.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -806,6 +806,10 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_RBBM_SW_RESET_CMD, A4XX_RBBM_SW_RESET_CMD), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A4XX_UCHE_INVALIDATE0), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE1, A4XX_UCHE_INVALIDATE1), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A4XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A4XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A4XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 0715022be6e3..3fb13c7a0814 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -715,6 +715,10 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev) if (ret) goto err; + /* Integer overflow check for cmd_size */ + if (data[2] > (data[0] - 2)) + goto err; + cmds = data + data[2] + 3; cmd_size = data[0] - data[2] - 2; @@ -2069,6 +2073,9 @@ static void a5xx_start(struct adreno_device *adreno_dev) } + /* Disable All flat shading optimization */ + kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 10); + /* * VPC corner case with local memory load kill leads to corrupt * internal state. Normal Disable does not work for all a5x chips. @@ -3069,6 +3076,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_RBBM_BLOCK_SW_RESET_CMD2, A5XX_RBBM_BLOCK_SW_RESET_CMD2), ADRENO_REG_DEFINE(ADRENO_REG_UCHE_INVALIDATE0, A5XX_UCHE_INVALIDATE0), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_LO, + A5XX_RBBM_PERFCTR_RBBM_0_LO), + ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_RBBM_0_HI, + A5XX_RBBM_PERFCTR_RBBM_0_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_LO, A5XX_RBBM_PERFCTR_LOAD_VALUE_LO), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_LOAD_VALUE_HI, diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index 0e56731b16e2..883a9810fbf4 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -537,13 +537,42 @@ static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, "smmu_info"); } + +static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + + kgsl_free_global(device, &iommu->smmu_info); +} + #else static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev) { return -ENODEV; } + +static void a5xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ +} #endif +static void a5xx_preemption_close(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_preemption *preempt = &adreno_dev->preempt; + struct adreno_ringbuffer *rb; + unsigned int i; + + del_timer(&preempt->timer); + kgsl_free_global(device, &preempt->counters); + a5xx_preemption_iommu_close(adreno_dev); + + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + kgsl_free_global(device, &rb->preemption_desc); + } +} + int a5xx_preemption_init(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -568,7 +597,7 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, "preemption_counters"); if (ret) - return ret; + goto err; addr = preempt->counters.gpuaddr; @@ -576,10 +605,16 @@ int a5xx_preemption_init(struct adreno_device *adreno_dev) FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr); if (ret) - return ret; + goto err; addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE; } - return a5xx_preemption_iommu_init(adreno_dev); + ret = a5xx_preemption_iommu_init(adreno_dev); + +err: + if (ret) + a5xx_preemption_close(device); + + return ret; } diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 5306303b8d15..2027ac66f737 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -131,6 +131,8 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i, static void sync_event_print(struct seq_file *s, struct kgsl_drawobj_sync_event *sync_event) { + unsigned long flags; + switch (sync_event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: { seq_printf(s, "sync: ctx: %d ts: %d", @@ -138,9 +140,13 @@ static void sync_event_print(struct seq_file *s, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&sync_event->handle_lock, flags); + seq_printf(s, "sync: [%pK] %s", sync_event->handle, (sync_event->handle && sync_event->handle->fence) ? sync_event->handle->fence->name : "NULL"); + + spin_unlock_irqrestore(&sync_event->handle_lock, flags); break; default: seq_printf(s, "sync: type: %d", sync_event->type); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 8902c3175c79..1a94e71f5c1d 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -1460,7 +1460,9 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, spin_unlock(&drawctxt->lock); - kgsl_pwrctrl_update_l2pc(&adreno_dev->dev); + if (device->pwrctrl.l2pc_update_queue) + kgsl_pwrctrl_update_l2pc(&adreno_dev->dev, + KGSL_L2PC_QUEUE_TIMEOUT); /* Add the context to the dispatcher pending list */ dispatcher_queue_context(adreno_dev, drawctxt); diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index d79d9613043f..ddc53edce3c1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -26,6 +26,7 @@ #include "adreno_iommu.h" #include "adreno_pm4types.h" #include "adreno_ringbuffer.h" +#include "adreno_trace.h" #include "a3xx_reg.h" #include "adreno_a5xx.h" @@ -58,6 +59,7 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb) } static void adreno_get_submit_time(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, struct adreno_submit_time *time) { unsigned long flags; @@ -87,6 +89,9 @@ static void adreno_get_submit_time(struct adreno_device *adreno_dev, } else time->ticks = 0; + /* Trace the GPU time to create a mapping to ftrace time */ + trace_adreno_cmdbatch_sync(rb->drawctxt_active, time->ticks); + /* Get the kernel clock for time since boot */ time->ktime = local_clock(); @@ -128,7 +133,7 @@ void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb, _cff_write_ringbuffer(rb); if (time != NULL) - adreno_get_submit_time(adreno_dev, time); + adreno_get_submit_time(adreno_dev, rb, time); adreno_ringbuffer_wptr(adreno_dev, rb); } diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index 16ca0980cfbe..74c4c4e6e1fa 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -148,6 +148,29 @@ TRACE_EVENT(adreno_cmdbatch_retired, ) ); +TRACE_EVENT(adreno_cmdbatch_sync, + TP_PROTO(struct adreno_context *drawctxt, + uint64_t ticks), + TP_ARGS(drawctxt, ticks), + TP_STRUCT__entry( + __field(unsigned int, id) + __field(unsigned int, timestamp) + __field(uint64_t, ticks) + __field(int, prio) + ), + TP_fast_assign( + __entry->id = drawctxt->base.id; + __entry->timestamp = drawctxt->timestamp; + __entry->ticks = ticks; + __entry->prio = drawctxt->base.priority; + ), + TP_printk( + "ctx=%u ctx_prio=%d ts=%u ticks=%lld", + __entry->id, __entry->prio, __entry->timestamp, + __entry->ticks + ) +); + TRACE_EVENT(adreno_cmdbatch_fault, TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault), TP_ARGS(cmdobj, fault), diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c index 8dc2ebd26cf9..fba18231cb72 100644 --- a/drivers/gpu/msm/kgsl_drawobj.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -79,6 +79,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, { struct kgsl_drawobj_sync_event *event; unsigned int i; + unsigned long flags; for (i = 0; i < syncobj->numsyncs; i++) { event = &syncobj->synclist[i]; @@ -101,12 +102,16 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, break; } case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&event->handle_lock, flags); + if (event->handle) dev_err(device->dev, " fence: [%pK] %s\n", event->handle->fence, event->handle->name); else dev_err(device->dev, " fence: invalid\n"); + + spin_unlock_irqrestore(&event->handle_lock, flags); break; } } @@ -119,6 +124,7 @@ static void syncobj_timer(unsigned long data) struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); struct kgsl_drawobj_sync_event *event; unsigned int i; + unsigned long flags; if (syncobj == NULL || drawobj->context == NULL) return; @@ -147,12 +153,16 @@ static void syncobj_timer(unsigned long data) i, event->context->id, event->timestamp); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: + spin_lock_irqsave(&event->handle_lock, flags); + if (event->handle != NULL) { dev_err(device->dev, " [%d] FENCE %s\n", i, event->handle->fence ? event->handle->fence->name : "NULL"); kgsl_sync_fence_log(event->handle->fence); } + + spin_unlock_irqrestore(&event->handle_lock, flags); break; } } @@ -231,7 +241,7 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj) static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); - unsigned long pending; + unsigned long pending, flags; unsigned int i; /* Zap the canary timer */ @@ -262,8 +272,17 @@ static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) drawobj_sync_func, event); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: - if (kgsl_sync_fence_async_cancel(event->handle)) + spin_lock_irqsave(&event->handle_lock, flags); + + if (kgsl_sync_fence_async_cancel(event->handle)) { + event->handle = NULL; + spin_unlock_irqrestore( + &event->handle_lock, flags); drawobj_put(drawobj); + } else { + spin_unlock_irqrestore( + &event->handle_lock, flags); + } break; } } @@ -325,12 +344,23 @@ EXPORT_SYMBOL(kgsl_drawobj_destroy); static void drawobj_sync_fence_func(void *priv) { + unsigned long flags; struct kgsl_drawobj_sync_event *event = priv; + drawobj_sync_expire(event->device, event); + trace_syncpoint_fence_expire(event->syncobj, event->handle ? event->handle->name : "unknown"); - drawobj_sync_expire(event->device, event); + spin_lock_irqsave(&event->handle_lock, flags); + + /* + * Setting the event->handle to NULL here make sure that + * other function does not dereference a invalid pointer. + */ + event->handle = NULL; + + spin_unlock_irqrestore(&event->handle_lock, flags); drawobj_put(&event->syncobj->base); } @@ -350,6 +380,7 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, struct kgsl_drawobj_sync_event *event; struct sync_fence *fence = NULL; unsigned int id; + unsigned long flags; int ret = 0; fence = sync_fence_fdget(sync->fd); @@ -368,18 +399,23 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, event->device = device; event->context = NULL; + spin_lock_init(&event->handle_lock); set_bit(event->id, &syncobj->pending); trace_syncpoint_fence(syncobj, fence->name); + spin_lock_irqsave(&event->handle_lock, flags); + event->handle = kgsl_sync_fence_async_wait(sync->fd, drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { ret = PTR_ERR(event->handle); - clear_bit(event->id, &syncobj->pending); event->handle = NULL; + spin_unlock_irqrestore(&event->handle_lock, flags); + + clear_bit(event->id, &syncobj->pending); drawobj_put(drawobj); @@ -390,6 +426,8 @@ static int drawobj_add_sync_fence(struct kgsl_device *device, */ trace_syncpoint_fence_expire(syncobj, (ret < 0) ? "error" : fence->name); + } else { + spin_unlock_irqrestore(&event->handle_lock, flags); } sync_fence_put(fence); diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h index fd9d2bc93f41..b208870e4c42 100644 --- a/drivers/gpu/msm/kgsl_drawobj.h +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -114,6 +114,7 @@ struct kgsl_drawobj_sync { * register this event * @timestamp: Pending timestamp for the event * @handle: Pointer to a sync fence handle + * @handle_lock: Spin lock to protect handle * @device: Pointer to the KGSL device */ struct kgsl_drawobj_sync_event { @@ -123,6 +124,7 @@ struct kgsl_drawobj_sync_event { struct kgsl_context *context; unsigned int timestamp; struct kgsl_sync_fence_waiter *handle; + spinlock_t handle_lock; struct kgsl_device *device; }; diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index c31a85b07447..685ce3ea968b 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -65,26 +65,19 @@ _kgsl_get_pool_from_order(unsigned int order) /* Map the page into kernel and zero it out */ static void -_kgsl_pool_zero_page(struct page *p, unsigned int pool_order) +_kgsl_pool_zero_page(struct page *p) { - int i; - - for (i = 0; i < (1 << pool_order); i++) { - struct page *page = nth_page(p, i); - void *addr = kmap_atomic(page); + void *addr = kmap_atomic(p); - memset(addr, 0, PAGE_SIZE); - dmac_flush_range(addr, addr + PAGE_SIZE); - kunmap_atomic(addr); - } + memset(addr, 0, PAGE_SIZE); + dmac_flush_range(addr, addr + PAGE_SIZE); + kunmap_atomic(addr); } /* Add a page to specified pool */ static void _kgsl_pool_add_page(struct kgsl_page_pool *pool, struct page *p) { - _kgsl_pool_zero_page(p, pool->pool_order); - spin_lock(&pool->list_lock); list_add_tail(&p->lru, &pool->page_list); pool->page_count++; @@ -329,7 +322,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - _kgsl_pool_zero_page(page, order); goto done; } @@ -349,7 +341,6 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, page = alloc_pages(gfp_mask, order); if (page == NULL) return -ENOMEM; - _kgsl_pool_zero_page(page, order); goto done; } } @@ -379,13 +370,12 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, } else return -ENOMEM; } - - _kgsl_pool_zero_page(page, order); } done: for (j = 0; j < (*page_size >> PAGE_SHIFT); j++) { p = nth_page(page, j); + _kgsl_pool_zero_page(p); pages[pcount] = p; pcount++; } diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index ad8b0131bb46..8c998a5d791b 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -43,13 +43,6 @@ #define DEFAULT_BUS_P 25 -/* - * The effective duration of qos request in usecs. After - * timeout, qos request is cancelled automatically. - * Kept 80ms default, inline with default GPU idle time. - */ -#define KGSL_L2PC_CPU_TIMEOUT (80 * 1000) - /* Order deeply matters here because reasons. New entries go on the end */ static const char * const clocks[] = { "src_clk", @@ -520,12 +513,14 @@ EXPORT_SYMBOL(kgsl_pwrctrl_set_constraint); /** * kgsl_pwrctrl_update_l2pc() - Update existing qos request * @device: Pointer to the kgsl_device struct + * @timeout_us: the effective duration of qos request in usecs. * * Updates an existing qos request to avoid L2PC on the * CPUs (which are selected through dtsi) on which GPU * thread is running. This would help for performance. */ -void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device) +void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, + unsigned long timeout_us) { int cpu; @@ -539,7 +534,7 @@ void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device) pm_qos_update_request_timeout( &device->pwrctrl.l2pc_cpus_qos, device->pwrctrl.pm_qos_cpu_mask_latency, - KGSL_L2PC_CPU_TIMEOUT); + timeout_us); } } EXPORT_SYMBOL(kgsl_pwrctrl_update_l2pc); @@ -2201,6 +2196,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) kgsl_property_read_u32(device, "qcom,l2pc-cpu-mask", &pwr->l2pc_cpus_mask); + pwr->l2pc_update_queue = of_property_read_bool( + device->pdev->dev.of_node, + "qcom,l2pc-update-queue"); + pm_runtime_enable(&pdev->dev); ocmem_bus_node = of_find_node_by_name( diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index 42f918b80fcd..5c0071544f60 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -51,6 +51,19 @@ #define KGSL_PWR_DEL_LIMIT 1 #define KGSL_PWR_SET_LIMIT 2 +/* + * The effective duration of qos request in usecs at queue time. + * After timeout, qos request is cancelled automatically. + * Kept 80ms default, inline with default GPU idle time. + */ +#define KGSL_L2PC_QUEUE_TIMEOUT (80 * 1000) + +/* + * The effective duration of qos request in usecs at wakeup time. + * After timeout, qos request is cancelled automatically. + */ +#define KGSL_L2PC_WAKEUP_TIMEOUT (10 * 1000) + enum kgsl_pwrctrl_timer_type { KGSL_PWR_IDLE_TIMER, }; @@ -128,6 +141,7 @@ struct kgsl_regulator { * @irq_name - resource name for the IRQ * @clk_stats - structure of clock statistics * @l2pc_cpus_mask - mask to avoid L2PC on masked CPUs + * @l2pc_update_queue - Boolean flag to avoid L2PC on masked CPUs at queue time * @l2pc_cpus_qos - qos structure to avoid L2PC on CPUs * @pm_qos_req_dma - the power management quality of service structure * @pm_qos_active_latency - allowed CPU latency in microseconds when active @@ -183,6 +197,7 @@ struct kgsl_pwrctrl { const char *irq_name; struct kgsl_clk_stats clk_stats; unsigned int l2pc_cpus_mask; + bool l2pc_update_queue; struct pm_qos_request l2pc_cpus_qos; struct pm_qos_request pm_qos_req_dma; unsigned int pm_qos_active_latency; @@ -249,5 +264,6 @@ int kgsl_active_count_wait(struct kgsl_device *device, int count); void kgsl_pwrctrl_busy_time(struct kgsl_device *device, u64 time, u64 busy); void kgsl_pwrctrl_set_constraint(struct kgsl_device *device, struct kgsl_pwr_constraint *pwrc, uint32_t id); -void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device); +void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device, + unsigned long timeout_us); #endif /* __KGSL_PWRCTRL_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1c02deab068f..9eca4b41fa0a 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2287,6 +2287,10 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (cmd.port_num < rdma_start_port(ib_dev) || + cmd.port_num > rdma_end_port(ib_dev)) + return -EINVAL; + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, out_len); @@ -2827,6 +2831,10 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + if (cmd.attr.port_num < rdma_start_port(ib_dev) || + cmd.attr.port_num > rdma_end_port(ib_dev)) + return -EINVAL; + uobj = kmalloc(sizeof *uobj, GFP_KERNEL); if (!uobj) return -ENOMEM; diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c index 56f2732334db..30da797a85dc 100644 --- a/drivers/input/misc/hbtp_input.c +++ b/drivers/input/misc/hbtp_input.c @@ -249,6 +249,10 @@ static int hbtp_input_release(struct inode *inode, struct file *file) return -ENOTTY; } hbtp->count--; + if (!completion_done(&hbtp->power_suspend_sig)) + complete(&hbtp->power_suspend_sig); + if (!completion_done(&hbtp->power_resume_sig)) + complete(&hbtp->power_resume_sig); if (hbtp->power_sig_enabled) hbtp->power_sig_enabled = false; mutex_unlock(&hbtp->mutex); diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c index 78bdd24af28b..08bfb83a9447 100644 --- a/drivers/input/touchscreen/st/fts.c +++ b/drivers/input/touchscreen/st/fts.c @@ -1003,7 +1003,10 @@ static unsigned char *fts_status_event_handler( case FTS_WATER_MODE_ON: case FTS_WATER_MODE_OFF: default: - logError(1, "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", tag, __func__, event[0], event[1], event[2], event[3], event[4], event[5], event[6], event[7]); + logError(0, + "%s %s Received unhandled status event = %02X %02X %02X %02X %02X %02X %02X %02X\n", + tag, __func__, event[0], event[1], event[2], + event[3], event[4], event[5], event[6], event[7]); break; } @@ -1755,8 +1758,6 @@ static int fts_fb_state_chg_callback(struct notifier_block *nb, unsigned long va info->resume_bit = 1; - fts_system_reset(); - fts_mode_handler(info, 0); info->sensor_sleep = false; @@ -1959,9 +1960,9 @@ static int parse_dt(struct device *dev, struct fts_i2c_platform_data *bdata) bdata->bus_reg_name = name; logError(0, "%s bus_reg_name = %s\n", tag, name); - if (of_property_read_bool(np, "st, reset-gpio")) { + if (of_property_read_bool(np, "st,reset-gpio")) { bdata->reset_gpio = of_get_named_gpio_flags(np, - "st, reset-gpio", 0, NULL); + "st,reset-gpio", 0, NULL); logError(0, "%s reset_gpio =%d\n", tag, bdata->reset_gpio); } else { bdata->reset_gpio = GPIO_NOT_DEFINED; @@ -2210,7 +2211,13 @@ static int fts_probe(struct i2c_client *client, } #endif - queue_delayed_work(info->fwu_workqueue, &info->fwu_work, msecs_to_jiffies(EXP_FN_WORK_DELAY_MS)); + /*if wanna auto-update FW when probe, + * please don't comment the following code + */ + + /* queue_delayed_work(info->fwu_workqueue, &info->fwu_work, + * msecs_to_jiffies(EXP_FN_WORK_DELAY_MS)); + */ logError(1, "%s Probe Finished!\n", tag); return OK; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c6f74b149706..ce18a512b76a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1727,7 +1727,8 @@ static void arm_smmu_pgtbl_unlock(struct arm_smmu_domain *smmu_domain, static int arm_smmu_restore_sec_cfg(struct arm_smmu_device *smmu) { - int ret, scm_ret; + int ret; + u64 scm_ret; if (!arm_smmu_is_static_cb(smmu)) return 0; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2e0f61a2dc3f..9e96d81bc5cd 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -793,6 +793,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, int enabled; u64 val; + if (cpu >= nr_cpu_ids) + return -EINVAL; + if (gic_irq_in_rdist(d)) return -EINVAL; diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index 564d20079715..15c931bbbf65 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -158,6 +158,11 @@ #define FLASH_LED_DISABLE 0x00 #define FLASH_LED_SAFETY_TMR_DISABLED 0x13 #define FLASH_LED_MAX_TOTAL_CURRENT_MA 3750 +#define FLASH_LED_IRES5P0_MAX_CURR_MA 640 +#define FLASH_LED_IRES7P5_MAX_CURR_MA 960 +#define FLASH_LED_IRES10P0_MAX_CURR_MA 1280 +#define FLASH_LED_IRES12P5_MAX_CURR_MA 1600 +#define MAX_IRES_LEVELS 4 /* notifier call chain for flash-led irqs */ static ATOMIC_NOTIFIER_HEAD(irq_notifier_list); @@ -196,13 +201,15 @@ struct flash_node_data { struct pinctrl_state *hw_strobe_state_suspend; int hw_strobe_gpio; int ires_ua; + int default_ires_ua; int max_current; int current_ma; int prev_current_ma; u8 duration; u8 id; u8 type; - u8 ires; + u8 ires_idx; + u8 default_ires_idx; u8 hdrm_val; u8 current_reg_val; u8 strobe_ctrl; @@ -305,6 +312,11 @@ static int otst3_threshold_table[] = { 125, 119, 113, 107, 149, 143, 137, 131, }; +static int max_ires_curr_ma_table[MAX_IRES_LEVELS] = { + FLASH_LED_IRES12P5_MAX_CURR_MA, FLASH_LED_IRES10P0_MAX_CURR_MA, + FLASH_LED_IRES7P5_MAX_CURR_MA, FLASH_LED_IRES5P0_MAX_CURR_MA +}; + static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data) { int rc; @@ -935,6 +947,7 @@ static void qpnp_flash_led_aggregate_max_current(struct flash_node_data *fnode) static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) { + int i = 0; int prgm_current_ma = value; int min_ma = fnode->ires_ua / 1000; struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev); @@ -944,7 +957,22 @@ static void qpnp_flash_led_node_set(struct flash_node_data *fnode, int value) else if (value < min_ma) prgm_current_ma = min_ma; + fnode->ires_idx = fnode->default_ires_idx; + fnode->ires_ua = fnode->default_ires_ua; + prgm_current_ma = min(prgm_current_ma, fnode->max_current); + if (prgm_current_ma > max_ires_curr_ma_table[fnode->ires_idx]) { + /* find the matching ires */ + for (i = MAX_IRES_LEVELS - 1; i >= 0; i--) { + if (prgm_current_ma <= max_ires_curr_ma_table[i]) { + fnode->ires_idx = i; + fnode->ires_ua = FLASH_LED_IRES_MIN_UA + + (FLASH_LED_IRES_BASE - fnode->ires_idx) * + FLASH_LED_IRES_DIVISOR; + break; + } + } + } fnode->current_ma = prgm_current_ma; fnode->cdev.brightness = prgm_current_ma; fnode->current_reg_val = CURRENT_MA_TO_REG_VAL(prgm_current_ma, @@ -1062,7 +1090,7 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on) val = 0; for (i = 0; i < led->num_fnodes; i++) if (snode->led_mask & BIT(led->fnode[i].id)) - val |= led->fnode[i].ires << (led->fnode[i].id * 2); + val |= led->fnode[i].ires_idx << (led->fnode[i].id * 2); rc = qpnp_flash_led_masked_write(led, FLASH_LED_REG_IRES(led->base), FLASH_LED_CURRENT_MASK, val); @@ -1434,13 +1462,14 @@ static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led, return rc; } - fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA; - fnode->ires = FLASH_LED_IRES_DEFAULT_VAL; + fnode->default_ires_ua = fnode->ires_ua = FLASH_LED_IRES_DEFAULT_UA; + fnode->default_ires_idx = fnode->ires_idx = FLASH_LED_IRES_DEFAULT_VAL; rc = of_property_read_u32(node, "qcom,ires-ua", &val); if (!rc) { - fnode->ires_ua = val; - fnode->ires = FLASH_LED_IRES_BASE - - (val - FLASH_LED_IRES_MIN_UA) / FLASH_LED_IRES_DIVISOR; + fnode->default_ires_ua = fnode->ires_ua = val; + fnode->default_ires_idx = fnode->ires_idx = + FLASH_LED_IRES_BASE - (val - FLASH_LED_IRES_MIN_UA) / + FLASH_LED_IRES_DIVISOR; } else if (rc != -EINVAL) { pr_err("Unable to read current resolution rc=%d\n", rc); return rc; diff --git a/drivers/md/md.c b/drivers/md/md.c index eff554a12fb4..0a856cb181e9 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1866,7 +1866,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } sb = page_address(rdev->sb_page); sb->data_size = cpu_to_le64(num_sectors); - sb->super_offset = rdev->sb_start; + sb->super_offset = cpu_to_le64(rdev->sb_start); sb->sb_csum = calc_sb_1_csum(sb); md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -2273,7 +2273,7 @@ static bool does_sb_need_changing(struct mddev *mddev) /* Check if any mddev parameters have changed */ if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || - (mddev->layout != le64_to_cpu(sb->layout)) || + (mddev->layout != le32_to_cpu(sb->layout)) || (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) return true; diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c index 8ef6399d794f..bc957528f69f 100644 --- a/drivers/media/pci/saa7134/saa7134-i2c.c +++ b/drivers/media/pci/saa7134/saa7134-i2c.c @@ -355,12 +355,43 @@ static struct i2c_client saa7134_client_template = { /* ----------------------------------------------------------- */ +/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */ +static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev) +{ + u8 subaddr = 0x7, dmdregval; + u8 data[2]; + int ret; + struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0, + .buf = &subaddr, .len = 1}, + {.addr = 0x08, + .flags = I2C_M_RD, + .buf = &dmdregval, .len = 1} + }; + struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0, + .buf = data, .len = 2} }; + + ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2); + if ((ret == 2) && (dmdregval & 0x2)) { + pr_debug("%s: DVB-T demod i2c gate was left closed\n", + dev->name); + + data[0] = subaddr; + data[1] = (dmdregval & ~0x2); + if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1) + pr_err("%s: EEPROM i2c gate open failure\n", + dev->name); + } +} + static int saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len) { unsigned char buf; int i,err; + if (dev->board == SAA7134_BOARD_MD7134) + saa7134_i2c_eeprom_md7134_gate(dev); + dev->i2c_client.addr = 0xa0 >> 1; buf = 0; if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) { diff --git a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c index 420083f019cf..4024748e6afa 100644 --- a/drivers/media/platform/msm/ais/fd/msm_fd_dev.c +++ b/drivers/media/platform/msm/ais/fd/msm_fd_dev.c @@ -1053,14 +1053,18 @@ static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a) a->value = ctx->format.size->work_size; break; case V4L2_CID_FD_WORK_MEMORY_FD: + mutex_lock(&ctx->fd_device->recovery_lock); if (ctx->work_buf.fd != -1) msm_fd_hw_unmap_buffer(&ctx->work_buf); if (a->value >= 0) { ret = msm_fd_hw_map_buffer(&ctx->mem_pool, a->value, &ctx->work_buf); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&ctx->fd_device->recovery_lock); return ret; + } } + mutex_unlock(&ctx->fd_device->recovery_lock); break; default: return -EINVAL; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp.h b/drivers/media/platform/msm/ais/isp/msm_isp.h index 72a76d178aa8..86974eeb4a32 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp.h +++ b/drivers/media/platform/msm/ais/isp/msm_isp.h @@ -355,6 +355,7 @@ struct msm_vfe_hardware_info { uint32_t dmi_reg_offset; uint32_t min_ab; uint32_t min_ib; + uint32_t regulator_num; const char *regulator_names[]; }; diff --git a/drivers/media/platform/msm/ais/isp/msm_isp47.c b/drivers/media/platform/msm/ais/isp/msm_isp47.c index d63282f80aca..d33dc758aef9 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp47.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp47.c @@ -2537,8 +2537,7 @@ int msm_vfe47_get_regulators(struct vfe_device *vfe_dev) int rc = 0; int i; - vfe_dev->vfe_num_regulators = - sizeof(*vfe_dev->hw_info->regulator_names) / sizeof(char *); + vfe_dev->vfe_num_regulators = vfe_dev->hw_info->regulator_num; vfe_dev->regulator_info = kzalloc(sizeof(struct msm_cam_regulator) * vfe_dev->vfe_num_regulators, GFP_KERNEL); @@ -2811,6 +2810,7 @@ struct msm_vfe_hardware_info vfe47_hw_info = { .dmi_reg_offset = 0xC2C, .axi_hw_info = &msm_vfe47_axi_hw_info, .stats_hw_info = &msm_vfe47_stats_hw_info, + .regulator_num = 3, .regulator_names = {"vdd", "camss-vdd", "mmagic-vdd"}, }; EXPORT_SYMBOL(vfe47_hw_info); diff --git a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c index 6e89544161ee..0d08cffda25c 100644 --- a/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/ais/isp/msm_isp_stats_util.c @@ -891,6 +891,12 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg) struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL; struct msm_isp_sw_framskip *sw_skip_info = NULL; + if (update_cmd->num_streams > MSM_ISP_STATS_MAX) { + pr_err("%s: Invalid num_streams %d\n", + __func__, update_cmd->num_streams); + return -EINVAL; + } + /* validate request */ for (i = 0; i < update_cmd->num_streams; i++) { update_info = (struct msm_vfe_axi_stream_cfg_update_info *) diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c index bfb15846e73c..84266691f9b8 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c +++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c @@ -818,6 +818,10 @@ static long msm_ois_subdev_do_ioctl( parg = &ois_data; break; } + break; + case VIDIOC_MSM_OIS_CFG: + pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd); + return -EINVAL; } rc = msm_ois_subdev_ioctl(sd, cmd, parg); diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index 7bc5b5ad1122..cf897947fff2 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -8434,9 +8434,10 @@ out: */ static int qseecom_check_whitelist_feature(void) { - int version = scm_get_feat_version(FEATURE_ID_WHITELIST); + u64 version = 0; + int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version); - return version >= MAKE_WHITELIST_VERSION(1, 0, 0); + return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0)); } static int qseecom_probe(struct platform_device *pdev) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 01e5502917f7..d39b4056c169 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1173,7 +1173,7 @@ idata_free: cmd_done: mmc_blk_put(md); - if (card->cmdq_init) + if (card && card->cmdq_init) wake_up(&card->host->cmdq_ctx.wait); return err; } @@ -4623,6 +4623,10 @@ static int mmc_blk_probe(struct mmc_card *card) dev_set_drvdata(&card->dev, md); +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME + mmc_set_bus_resume_policy(card->host, 1); +#endif + if (mmc_add_disk(md)) goto out; @@ -4666,6 +4670,9 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME + mmc_set_bus_resume_policy(card->host, 0); +#endif } static int _mmc_blk_suspend(struct mmc_card *card, bool wait) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 70da30095b89..a5e4b4b93d1b 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1583,6 +1583,11 @@ static int bgmac_probe(struct bcma_device *core) dev_warn(&core->dev, "Using random MAC: %pM\n", mac); } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bcma_core_enable(core, 0); + /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c index 60b7a64c2edb..de14dcc6f4ed 100644 --- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c +++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c @@ -942,10 +942,13 @@ net_dev_reg_fail: netif_napi_del(&(rmnet_mhi_ptr->napi)); free_netdev(rmnet_mhi_ptr->dev); net_dev_alloc_fail: - mhi_close_channel(rmnet_mhi_ptr->rx_client_handle); - rmnet_mhi_ptr->dev = NULL; + if (rmnet_mhi_ptr->rx_client_handle) { + mhi_close_channel(rmnet_mhi_ptr->rx_client_handle); + rmnet_mhi_ptr->dev = NULL; + } mhi_rx_chan_start_fail: - mhi_close_channel(rmnet_mhi_ptr->tx_client_handle); + if (rmnet_mhi_ptr->tx_client_handle) + mhi_close_channel(rmnet_mhi_ptr->tx_client_handle); mhi_tx_chan_start_fail: rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited ret %d.\n", ret); return ret; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 84b9cca152eb..e83acc608678 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -907,7 +907,7 @@ static void decode_txts(struct dp83640_private *dp83640, if (overflow) { pr_debug("tx timestamp queue overflow, count %d\n", overflow); while (skb) { - skb_complete_tx_timestamp(skb, NULL); + kfree_skb(skb); skb = skb_dequeue(&dp83640->tx_queue); } return; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index e13ad6cdcc22..c8b85f1069ff 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -539,6 +539,8 @@ static int ksz9031_read_status(struct phy_device *phydev) if ((regval & 0xFF) == 0xFF) { phy_init_hw(phydev); phydev->link = 0; + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) + phydev->drv->config_intr(phydev); } return 0; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 349aecbc210a..ac945f8781ac 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -733,15 +733,15 @@ static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) static void vrf_dev_uninit(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct slave_queue *queue = &vrf->queue; - struct list_head *head = &queue->all_slaves; - struct slave *slave, *next; +// struct slave_queue *queue = &vrf->queue; +// struct list_head *head = &queue->all_slaves; +// struct slave *slave, *next; vrf_rtable_destroy(vrf); vrf_rt6_destroy(vrf); - list_for_each_entry_safe(slave, next, head, list) - vrf_del_slave(dev, slave->dev); +// list_for_each_entry_safe(slave, next, head, list) +// vrf_del_slave(dev, slave->dev); free_percpu(dev->dstats); dev->dstats = NULL; @@ -914,6 +914,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) static void vrf_dellink(struct net_device *dev, struct list_head *head) { + struct net_vrf *vrf = netdev_priv(dev); + struct slave_queue *queue = &vrf->queue; + struct list_head *all_slaves = &queue->all_slaves; + struct slave *slave, *next; + + list_for_each_entry_safe(slave, next, all_slaves, list) + vrf_del_slave(dev, slave->dev); + unregister_netdevice_queue(dev, head); } diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 9e607b2fa2d4..ca77861c4320 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3139,7 +3139,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar) setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, (unsigned long)ar); - if (QCA_REV_6174(ar)) + if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); ret = ath10k_pci_alloc_pipes(ar); diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 75e81991913a..0c60a7baae82 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1274,19 +1274,19 @@ static int ath10k_snoc_probe(struct platform_device *pdev) ret = ath10k_snoc_claim(ar); if (ret) { ath10k_err(ar, "failed to claim device: %d\n", ret); - goto err_core_destroy; + goto err_stop_qmi_service; } ret = ath10k_snoc_bus_configure(ar); if (ret) { ath10k_err(ar, "failed to configure bus: %d\n", ret); - goto err_core_destroy; + goto err_stop_qmi_service; } ret = ath10k_snoc_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); - goto err_core_destroy; + goto err_stop_qmi_service; } netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll, @@ -1319,6 +1319,9 @@ err_free_irq: err_free_pipes: ath10k_snoc_free_pipes(ar); +err_stop_qmi_service: + ath10k_snoc_stop_qmi_service(ar); + err_core_destroy: ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 63bb7686b811..94861020af12 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -960,7 +960,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); - if (len < sizeof(struct ieee80211_mgmt)) + if (len < sizeof(struct ieee80211_hdr_3addr)) return -EINVAL; cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c index 70a6985334d5..da5826d788d6 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c @@ -4472,6 +4472,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_KERNEL); } else if (ieee80211_is_action(mgmt->frame_control)) { + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { + brcmf_err("invalid action frame length\n"); + err = -EINVAL; + goto exit; + } af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); if (af_params == NULL) { brcmf_err("unable to allocate frame\n"); diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c index 22c59d8c3c45..af92f00ca56e 100644 --- a/drivers/net/wireless/cnss/cnss_pci.c +++ b/drivers/net/wireless/cnss/cnss_pci.c @@ -83,6 +83,7 @@ #define QCA6180_DEVICE_ID (0x0041) #define QCA6180_REV_ID_OFFSET (0x08) +#define WLAN_EN_VREG_NAME "vdd-wlan-en" #define WLAN_VREG_NAME "vdd-wlan" #define WLAN_VREG_IO_NAME "vdd-wlan-io" #define WLAN_VREG_XTAL_NAME "vdd-wlan-xtal" @@ -114,6 +115,8 @@ #define WLAN_VREG_SP2T_MIN 2700000 #define POWER_ON_DELAY 2 +#define WLAN_VREG_IO_DELAY_MIN 100 +#define WLAN_VREG_IO_DELAY_MAX 1000 #define WLAN_ENABLE_DELAY 10 #define WLAN_RECOVERY_DELAY 1 #define PCIE_ENABLE_DELAY 100 @@ -134,7 +137,6 @@ static DEFINE_SPINLOCK(pci_link_down_lock); #define FW_IMAGE_MISSION (0x02) #define FW_IMAGE_BDATA (0x03) #define FW_IMAGE_PRINT (0x04) -#define FW_SETUP_DELAY 2000 #define SEG_METADATA (0x01) #define SEG_NON_PAGED (0x02) @@ -155,6 +157,7 @@ struct cnss_wlan_gpio_info { }; struct cnss_wlan_vreg_info { + struct regulator *wlan_en_reg; struct regulator *wlan_reg; struct regulator *soc_swreg; struct regulator *ant_switch; @@ -237,6 +240,7 @@ static struct cnss_data { dma_addr_t smmu_iova_start; size_t smmu_iova_len; struct cnss_wlan_vreg_info vreg_info; + bool wlan_en_vreg_support; struct cnss_wlan_gpio_info gpio_info; bool pcie_link_state; bool pcie_link_down_ind; @@ -274,10 +278,8 @@ static struct cnss_data { u32 fw_dma_size; u32 fw_seg_count; struct segment_memory fw_seg_mem[MAX_NUM_OF_SEGMENTS]; - atomic_t fw_store_in_progress; /* Firmware setup complete lock */ struct mutex fw_setup_stat_lock; - struct completion fw_setup_complete; void *bdata_cpu; dma_addr_t bdata_dma; u32 bdata_dma_size; @@ -294,6 +296,17 @@ module_param(pcie_link_down_panic, uint, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(pcie_link_down_panic, "Trigger kernel panic when PCIe link down is detected"); +static void cnss_put_wlan_enable_gpio(void) +{ + struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info; + struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info; + + if (penv->wlan_en_vreg_support) + regulator_put(vreg_info->wlan_en_reg); + else + gpio_free(gpio_info->num); +} + static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info) { int ret; @@ -307,13 +320,6 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info) } } - ret = regulator_enable(vreg_info->wlan_reg); - if (ret) { - pr_err("%s: regulator enable failed for WLAN power\n", - __func__); - goto error_enable; - } - if (vreg_info->wlan_reg_io) { ret = regulator_enable(vreg_info->wlan_reg_io); if (ret) { @@ -321,6 +327,8 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info) __func__); goto error_enable_reg_io; } + + usleep_range(WLAN_VREG_IO_DELAY_MIN, WLAN_VREG_IO_DELAY_MAX); } if (vreg_info->wlan_reg_xtal_aon) { @@ -341,6 +349,13 @@ static int cnss_wlan_vreg_on(struct cnss_wlan_vreg_info *vreg_info) } } + ret = regulator_enable(vreg_info->wlan_reg); + if (ret) { + pr_err("%s: regulator enable failed for WLAN power\n", + __func__); + goto error_enable; + } + if (vreg_info->wlan_reg_sp2t) { ret = regulator_enable(vreg_info->wlan_reg_sp2t); if (ret) { @@ -377,6 +392,8 @@ error_enable_ant_switch: if (vreg_info->wlan_reg_sp2t) regulator_disable(vreg_info->wlan_reg_sp2t); error_enable_reg_sp2t: + regulator_disable(vreg_info->wlan_reg); +error_enable: if (vreg_info->wlan_reg_xtal) regulator_disable(vreg_info->wlan_reg_xtal); error_enable_reg_xtal: @@ -386,8 +403,6 @@ error_enable_reg_xtal_aon: if (vreg_info->wlan_reg_io) regulator_disable(vreg_info->wlan_reg_io); error_enable_reg_io: - regulator_disable(vreg_info->wlan_reg); -error_enable: if (vreg_info->wlan_reg_core) regulator_disable(vreg_info->wlan_reg_core); error_enable_reg_core: @@ -425,6 +440,13 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info) } } + ret = regulator_disable(vreg_info->wlan_reg); + if (ret) { + pr_err("%s: regulator disable failed for WLAN power\n", + __func__); + goto error_disable; + } + if (vreg_info->wlan_reg_xtal) { ret = regulator_disable(vreg_info->wlan_reg_xtal); if (ret) { @@ -452,13 +474,6 @@ static int cnss_wlan_vreg_off(struct cnss_wlan_vreg_info *vreg_info) } } - ret = regulator_disable(vreg_info->wlan_reg); - if (ret) { - pr_err("%s: regulator disable failed for WLAN power\n", - __func__); - goto error_disable; - } - if (vreg_info->wlan_reg_core) { ret = regulator_disable(vreg_info->wlan_reg_core); if (ret) { @@ -575,6 +590,25 @@ static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state) info->name, info->state ? "enabled" : "disabled"); } +static int cnss_configure_wlan_en_gpio(bool state) +{ + int ret = 0; + struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info; + struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info; + + if (penv->wlan_en_vreg_support) { + if (state) + ret = regulator_enable(vreg_info->wlan_en_reg); + else + ret = regulator_disable(vreg_info->wlan_en_reg); + } else { + cnss_wlan_gpio_set(gpio_info, state); + } + + msleep(WLAN_ENABLE_DELAY); + return ret; +} + static int cnss_pinctrl_init(struct cnss_wlan_gpio_info *gpio_info, struct platform_device *pdev) { @@ -681,14 +715,71 @@ end: return ret; } +static int cnss_get_wlan_enable_gpio( + struct cnss_wlan_gpio_info *gpio_info, + struct platform_device *pdev) +{ + int ret = 0; + struct device *dev = &pdev->dev; + + if (!of_find_property(dev->of_node, gpio_info->name, NULL)) { + gpio_info->prop = false; + return -ENODEV; + } + + gpio_info->prop = true; + ret = of_get_named_gpio(dev->of_node, gpio_info->name, 0); + if (ret >= 0) { + gpio_info->num = ret; + } else { + if (ret == -EPROBE_DEFER) + pr_debug("get WLAN_EN GPIO probe defer\n"); + else + pr_err( + "can't get gpio %s ret %d", gpio_info->name, ret); + } + + ret = cnss_pinctrl_init(gpio_info, pdev); + if (ret) + pr_debug("%s: pinctrl init failed!\n", __func__); + + ret = cnss_wlan_gpio_init(gpio_info); + if (ret) + pr_err("gpio init failed\n"); + + return ret; +} + +static int cnss_get_wlan_bootstrap_gpio(struct platform_device *pdev) +{ + int ret = 0; + struct device_node *node = (&pdev->dev)->of_node; + + if (!of_find_property(node, WLAN_BOOTSTRAP_GPIO_NAME, NULL)) + return ret; + + penv->wlan_bootstrap_gpio = + of_get_named_gpio(node, WLAN_BOOTSTRAP_GPIO_NAME, 0); + if (penv->wlan_bootstrap_gpio > 0) { + ret = cnss_wlan_bootstrap_gpio_init(); + } else { + ret = penv->wlan_bootstrap_gpio; + pr_err( + "%s: Can't get GPIO %s, ret = %d", + __func__, WLAN_BOOTSTRAP_GPIO_NAME, ret); + } + + return ret; +} + static int cnss_wlan_get_resources(struct platform_device *pdev) { int ret = 0; struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info; struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info; + struct device_node *node = pdev->dev.of_node; - if (of_get_property(pdev->dev.of_node, - WLAN_VREG_CORE_NAME"-supply", NULL)) { + if (of_get_property(node, WLAN_VREG_CORE_NAME "-supply", NULL)) { vreg_info->wlan_reg_core = regulator_get(&pdev->dev, WLAN_VREG_CORE_NAME); if (IS_ERR(vreg_info->wlan_reg_core)) { @@ -718,26 +809,7 @@ static int cnss_wlan_get_resources(struct platform_device *pdev) } } - vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME); - - if (IS_ERR(vreg_info->wlan_reg)) { - if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER) - pr_err("%s: vreg probe defer\n", __func__); - else - pr_err("%s: vreg regulator get failed\n", __func__); - ret = PTR_ERR(vreg_info->wlan_reg); - goto err_reg_get; - } - - ret = regulator_enable(vreg_info->wlan_reg); - - if (ret) { - pr_err("%s: vreg initial vote failed\n", __func__); - goto err_reg_enable; - } - - if (of_get_property(pdev->dev.of_node, - WLAN_VREG_IO_NAME"-supply", NULL)) { + if (of_get_property(node, WLAN_VREG_IO_NAME "-supply", NULL)) { vreg_info->wlan_reg_io = regulator_get(&pdev->dev, WLAN_VREG_IO_NAME); if (!IS_ERR(vreg_info->wlan_reg_io)) { @@ -755,14 +827,34 @@ static int cnss_wlan_get_resources(struct platform_device *pdev) __func__); goto err_reg_io_enable; } + + usleep_range(WLAN_VREG_IO_DELAY_MIN, + WLAN_VREG_IO_DELAY_MAX); } } if (cnss_enable_xtal_ldo(pdev)) goto err_reg_xtal_enable; - if (of_get_property(pdev->dev.of_node, - WLAN_VREG_SP2T_NAME"-supply", NULL)) { + vreg_info->wlan_reg = regulator_get(&pdev->dev, WLAN_VREG_NAME); + + if (IS_ERR(vreg_info->wlan_reg)) { + if (PTR_ERR(vreg_info->wlan_reg) == -EPROBE_DEFER) + pr_err("%s: vreg probe defer\n", __func__); + else + pr_err("%s: vreg regulator get failed\n", __func__); + ret = PTR_ERR(vreg_info->wlan_reg); + goto err_reg_get; + } + + ret = regulator_enable(vreg_info->wlan_reg); + + if (ret) { + pr_err("%s: vreg initial vote failed\n", __func__); + goto err_reg_enable; + } + + if (of_get_property(node, WLAN_VREG_SP2T_NAME "-supply", NULL)) { vreg_info->wlan_reg_sp2t = regulator_get(&pdev->dev, WLAN_VREG_SP2T_NAME); if (!IS_ERR(vreg_info->wlan_reg_sp2t)) { @@ -783,8 +875,7 @@ static int cnss_wlan_get_resources(struct platform_device *pdev) } } - if (of_get_property(pdev->dev.of_node, - WLAN_ANT_SWITCH_NAME "-supply", NULL)) { + if (of_get_property(node, WLAN_ANT_SWITCH_NAME "-supply", NULL)) { vreg_info->ant_switch = regulator_get(&pdev->dev, WLAN_ANT_SWITCH_NAME); if (!IS_ERR(vreg_info->ant_switch)) { @@ -814,13 +905,10 @@ static int cnss_wlan_get_resources(struct platform_device *pdev) } } - if (of_find_property((&pdev->dev)->of_node, - "qcom,wlan-uart-access", NULL)) + if (of_find_property(node, "qcom,wlan-uart-access", NULL)) penv->cap.cap_flag |= CNSS_HAS_UART_ACCESS; - if (of_get_property(pdev->dev.of_node, - WLAN_SWREG_NAME"-supply", NULL)) { - + if (of_get_property(node, WLAN_SWREG_NAME "-supply", NULL)) { vreg_info->soc_swreg = regulator_get(&pdev->dev, WLAN_SWREG_NAME); if (IS_ERR(vreg_info->soc_swreg)) { @@ -843,68 +931,41 @@ static int cnss_wlan_get_resources(struct platform_device *pdev) penv->cap.cap_flag |= CNSS_HAS_EXTERNAL_SWREG; } - vreg_info->state = VREG_ON; - - if (!of_find_property((&pdev->dev)->of_node, gpio_info->name, NULL)) { - gpio_info->prop = false; - goto end; + penv->wlan_en_vreg_support = + of_property_read_bool(node, "qcom,wlan-en-vreg-support"); + if (penv->wlan_en_vreg_support) { + vreg_info->wlan_en_reg = + regulator_get(&pdev->dev, WLAN_EN_VREG_NAME); + if (IS_ERR(vreg_info->wlan_en_reg)) { + pr_err("%s:wlan_en vreg get failed\n", __func__); + ret = PTR_ERR(vreg_info->wlan_en_reg); + goto err_wlan_en_reg_get; + } } - gpio_info->prop = true; - ret = of_get_named_gpio((&pdev->dev)->of_node, - gpio_info->name, 0); - - if (ret >= 0) { - gpio_info->num = ret; - ret = 0; - } else { - if (ret == -EPROBE_DEFER) - pr_debug("get WLAN_EN GPIO probe defer\n"); - else - pr_err("can't get gpio %s ret %d", - gpio_info->name, ret); - goto err_get_gpio; + if (!penv->wlan_en_vreg_support) { + ret = cnss_get_wlan_enable_gpio(gpio_info, pdev); + if (ret) { + pr_err( + "%s:Failed to config the WLAN_EN gpio\n", __func__); + goto err_gpio_wlan_en; + } } + vreg_info->state = VREG_ON; - ret = cnss_pinctrl_init(gpio_info, pdev); + ret = cnss_get_wlan_bootstrap_gpio(pdev); if (ret) { - pr_err("%s: pinctrl init failed!\n", __func__); - goto err_pinctrl_init; + pr_err("%s: Failed to enable wlan bootstrap gpio\n", __func__); + goto err_gpio_wlan_bootstrap; } - ret = cnss_wlan_gpio_init(gpio_info); - if (ret) { - pr_err("gpio init failed\n"); - goto err_gpio_init; - } - - if (of_find_property((&pdev->dev)->of_node, - WLAN_BOOTSTRAP_GPIO_NAME, NULL)) { - penv->wlan_bootstrap_gpio = - of_get_named_gpio((&pdev->dev)->of_node, - WLAN_BOOTSTRAP_GPIO_NAME, 0); - if (penv->wlan_bootstrap_gpio > 0) { - ret = cnss_wlan_bootstrap_gpio_init(); - if (ret) - goto err_gpio_init; - } else { - if (ret == -EPROBE_DEFER) { - pr_debug("%s: Get GPIO %s probe defer\n", - __func__, WLAN_BOOTSTRAP_GPIO_NAME); - } else { - pr_err("%s: Can't get GPIO %s, ret = %d", - __func__, WLAN_BOOTSTRAP_GPIO_NAME, - ret); - } - goto err_gpio_init; - } - } -end: return ret; -err_gpio_init: -err_pinctrl_init: -err_get_gpio: +err_gpio_wlan_bootstrap: + cnss_put_wlan_enable_gpio(); +err_gpio_wlan_en: +err_wlan_en_reg_get: + vreg_info->wlan_en_reg = NULL; if (vreg_info->soc_swreg) regulator_disable(vreg_info->soc_swreg); vreg_info->state = VREG_OFF; @@ -929,7 +990,11 @@ err_reg_sp2t_enable: err_reg_sp2t_set: if (vreg_info->wlan_reg_sp2t) regulator_put(vreg_info->wlan_reg_sp2t); + regulator_disable(vreg_info->wlan_reg); +err_reg_enable: + regulator_put(vreg_info->wlan_reg); +err_reg_get: cnss_disable_xtal_ldo(pdev); err_reg_xtal_enable: @@ -940,12 +1005,6 @@ err_reg_io_enable: err_reg_io_set: if (vreg_info->wlan_reg_io) regulator_put(vreg_info->wlan_reg_io); - regulator_disable(vreg_info->wlan_reg); - -err_reg_enable: - regulator_put(vreg_info->wlan_reg); - -err_reg_get: if (vreg_info->wlan_reg_core) regulator_disable(vreg_info->wlan_reg_core); @@ -965,7 +1024,7 @@ static void cnss_wlan_release_resources(void) if (penv->wlan_bootstrap_gpio > 0) gpio_free(penv->wlan_bootstrap_gpio); - gpio_free(gpio_info->num); + cnss_put_wlan_enable_gpio(); gpio_info->state = WLAN_EN_LOW; gpio_info->prop = false; cnss_wlan_vreg_set(vreg_info, VREG_OFF); @@ -975,13 +1034,13 @@ static void cnss_wlan_release_resources(void) regulator_put(vreg_info->ant_switch); if (vreg_info->wlan_reg_sp2t) regulator_put(vreg_info->wlan_reg_sp2t); + regulator_put(vreg_info->wlan_reg); if (vreg_info->wlan_reg_xtal) regulator_put(vreg_info->wlan_reg_xtal); if (vreg_info->wlan_reg_xtal_aon) regulator_put(vreg_info->wlan_reg_xtal_aon); if (vreg_info->wlan_reg_io) regulator_put(vreg_info->wlan_reg_io); - regulator_put(vreg_info->wlan_reg); if (vreg_info->wlan_reg_core) regulator_put(vreg_info->wlan_reg_core); vreg_info->state = VREG_OFF; @@ -1374,15 +1433,6 @@ int cnss_get_fw_image(struct image_desc_info *image_desc_info) !penv->fw_seg_count || !penv->bdata_seg_count) return -EINVAL; - /* Check for firmware setup trigger by usersapce is in progress - * and wait for complition of firmware setup. - */ - - if (atomic_read(&penv->fw_store_in_progress)) { - wait_for_completion_timeout(&penv->fw_setup_complete, - msecs_to_jiffies(FW_SETUP_DELAY)); - } - mutex_lock(&penv->fw_setup_stat_lock); image_desc_info->fw_addr = penv->fw_dma; image_desc_info->fw_size = penv->fw_dma_size; @@ -1560,7 +1610,6 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev, { int ret = 0; struct cnss_wlan_vreg_info *vreg_info = &penv->vreg_info; - struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info; void *cpu_addr; dma_addr_t dma_handle; struct codeswap_codeseg_info *cnss_seg_info = NULL; @@ -1619,7 +1668,7 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev, penv->pcie_link_state = PCIE_LINK_DOWN; } - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); ret = cnss_wlan_vreg_set(vreg_info, VREG_OFF); if (ret) { @@ -1627,7 +1676,9 @@ static int cnss_wlan_pci_probe(struct pci_dev *pdev, goto err_pcie_suspend; } + mutex_lock(&penv->fw_setup_stat_lock); cnss_wlan_fw_mem_alloc(pdev); + mutex_unlock(&penv->fw_setup_stat_lock); ret = device_create_file(&penv->pldev->dev, &dev_attr_wlan_setup); @@ -1874,17 +1925,11 @@ static ssize_t fw_image_setup_store(struct device *dev, if (!penv) return -ENODEV; - if (atomic_read(&penv->fw_store_in_progress)) { - pr_info("%s: Firmware setup in progress\n", __func__); - return 0; - } - - atomic_set(&penv->fw_store_in_progress, 1); - init_completion(&penv->fw_setup_complete); + mutex_lock(&penv->fw_setup_stat_lock); + pr_info("%s: Firmware setup in progress\n", __func__); if (kstrtoint(buf, 0, &val)) { - atomic_set(&penv->fw_store_in_progress, 0); - complete(&penv->fw_setup_complete); + mutex_unlock(&penv->fw_setup_stat_lock); return -EINVAL; } @@ -1895,8 +1940,7 @@ static ssize_t fw_image_setup_store(struct device *dev, if (ret != 0) { pr_err("%s: Invalid parsing of FW image files %d", __func__, ret); - atomic_set(&penv->fw_store_in_progress, 0); - complete(&penv->fw_setup_complete); + mutex_unlock(&penv->fw_setup_stat_lock); return -EINVAL; } penv->fw_image_setup = val; @@ -1906,9 +1950,8 @@ static ssize_t fw_image_setup_store(struct device *dev, penv->bmi_test = val; } - atomic_set(&penv->fw_store_in_progress, 0); - complete(&penv->fw_setup_complete); - + pr_info("%s: Firmware setup completed\n", __func__); + mutex_unlock(&penv->fw_setup_stat_lock); return count; } @@ -2007,16 +2050,21 @@ int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg) { struct codeswap_codeseg_info *cnss_seg_info = penv->cnss_seg_info; + mutex_lock(&penv->fw_setup_stat_lock); if (!cnss_seg_info) { swap_seg = NULL; + mutex_unlock(&penv->fw_setup_stat_lock); return -ENOENT; } + if (!atomic_read(&penv->fw_available)) { pr_debug("%s: fw is not available\n", __func__); + mutex_unlock(&penv->fw_setup_stat_lock); return -ENOENT; } *swap_seg = *cnss_seg_info; + mutex_unlock(&penv->fw_setup_stat_lock); return 0; } @@ -2035,15 +2083,6 @@ static void cnss_wlan_memory_expansion(void) u_int32_t total_length = 0; struct pci_dev *pdev; - /* Check for firmware setup trigger by usersapce is in progress - * and wait for complition of firmware setup. - */ - - if (atomic_read(&penv->fw_store_in_progress)) { - wait_for_completion_timeout(&penv->fw_setup_complete, - msecs_to_jiffies(FW_SETUP_DELAY)); - } - mutex_lock(&penv->fw_setup_stat_lock); filename = cnss_wlan_get_evicted_data_file(); pdev = penv->pdev; @@ -2276,8 +2315,7 @@ again: msleep(WLAN_BOOTSTRAP_DELAY); } - cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH); - msleep(WLAN_ENABLE_DELAY); + cnss_configure_wlan_en_gpio(WLAN_EN_HIGH); if (!pdev) { pr_debug("%s: invalid pdev. register pci device\n", __func__); @@ -2360,8 +2398,7 @@ again: cnss_get_pci_dev_bus_number(pdev), pdev, PM_OPTIONS); penv->pcie_link_state = PCIE_LINK_DOWN; - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); - msleep(WLAN_ENABLE_DELAY); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_vreg_set(vreg_info, VREG_OFF); msleep(POWER_ON_DELAY); probe_again++; @@ -2388,7 +2425,7 @@ err_pcie_link_up: } err_pcie_reg: - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_vreg_set(vreg_info, VREG_OFF); if (penv->pdev) { pr_err("%d: Unregistering PCI device\n", __LINE__); @@ -2469,8 +2506,7 @@ void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver) cut_power: penv->driver = NULL; - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); - + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); if (cnss_wlan_vreg_set(vreg_info, VREG_OFF)) pr_err("wlan vreg OFF failed\n"); } @@ -2582,8 +2618,7 @@ static int cnss_shutdown(const struct subsys_desc *subsys, bool force_stop) } cut_power: - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); - + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); if (cnss_wlan_vreg_set(vreg_info, VREG_OFF)) pr_err("cnss: Failed to set WLAN VREG_OFF!\n"); @@ -2616,8 +2651,7 @@ static int cnss_powerup(const struct subsys_desc *subsys) } msleep(POWER_ON_DELAY); - cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH); - msleep(WLAN_ENABLE_DELAY); + cnss_configure_wlan_en_gpio(WLAN_EN_HIGH); if (!pdev) { pr_err("%d: invalid pdev\n", __LINE__); @@ -2677,7 +2711,7 @@ err_wlan_reinit: penv->pcie_link_state = PCIE_LINK_DOWN; err_pcie_link_up: - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_vreg_set(vreg_info, VREG_OFF); if (penv->pdev) { pr_err("%d: Unregistering pci device\n", __LINE__); @@ -2859,13 +2893,17 @@ static int cnss_probe(struct platform_device *pdev) penv->vreg_info.wlan_reg = NULL; penv->vreg_info.state = VREG_OFF; penv->pci_register_again = false; + mutex_init(&penv->fw_setup_stat_lock); ret = cnss_wlan_get_resources(pdev); if (ret) goto err_get_wlan_res; - cnss_wlan_gpio_set(&penv->gpio_info, WLAN_EN_HIGH); - msleep(WLAN_ENABLE_DELAY); + ret = cnss_configure_wlan_en_gpio(WLAN_EN_HIGH); + if (ret) { + pr_err("%s: Failed to enable WLAN enable gpio\n", __func__); + goto err_get_rc; + } ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num); if (ret) { @@ -3016,8 +3054,6 @@ skip_ramdump: memset(phys_to_virt(0), 0, SZ_4K); #endif - atomic_set(&penv->fw_store_in_progress, 0); - mutex_init(&penv->fw_setup_stat_lock); ret = device_create_file(dev, &dev_attr_fw_image_setup); if (ret) { pr_err("cnss: fw_image_setup sys file creation failed\n"); @@ -3062,7 +3098,7 @@ err_subsys_reg: err_esoc_reg: err_pcie_enumerate: err_get_rc: - cnss_wlan_gpio_set(&penv->gpio_info, WLAN_EN_LOW); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_release_resources(); err_get_wlan_res: @@ -3073,8 +3109,6 @@ err_get_wlan_res: static int cnss_remove(struct platform_device *pdev) { - struct cnss_wlan_gpio_info *gpio_info = &penv->gpio_info; - unregister_pm_notifier(&cnss_pm_notifier); device_remove_file(&pdev->dev, &dev_attr_fw_image_setup); @@ -3095,7 +3129,7 @@ static int cnss_remove(struct platform_device *pdev) } } - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); if (penv->wlan_bootstrap_gpio > 0) gpio_set_value(penv->wlan_bootstrap_gpio, WLAN_BOOTSTRAP_LOW); cnss_wlan_release_resources(); @@ -3596,8 +3630,7 @@ static int __cnss_pcie_power_up(struct device *dev) msleep(WLAN_BOOTSTRAP_DELAY); } - cnss_wlan_gpio_set(gpio_info, WLAN_EN_HIGH); - msleep(WLAN_ENABLE_DELAY); + cnss_configure_wlan_en_gpio(WLAN_EN_HIGH); return 0; } @@ -3610,8 +3643,7 @@ static int __cnss_pcie_power_down(struct device *dev) vreg_info = &penv->vreg_info; gpio_info = &penv->gpio_info; - cnss_wlan_gpio_set(gpio_info, WLAN_EN_LOW); - + cnss_configure_wlan_en_gpio(WLAN_EN_LOW); if (penv->wlan_bootstrap_gpio > 0) gpio_set_value(penv->wlan_bootstrap_gpio, WLAN_BOOTSTRAP_LOW); diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c index 360ab31c61dd..c3bcb38f428f 100644 --- a/drivers/net/wireless/cnss2/debug.c +++ b/drivers/net/wireless/cnss2/debug.c @@ -15,6 +15,7 @@ #include <linux/debugfs.h> #include "main.h" #include "debug.h" +#include "pci.h" #define CNSS_IPC_LOG_PAGES 32 @@ -121,13 +122,90 @@ static int cnss_stats_open(struct inode *inode, struct file *file) } static const struct file_operations cnss_stats_fops = { - .read = seq_read, - .release = single_release, - .open = cnss_stats_open, - .owner = THIS_MODULE, - .llseek = seq_lseek, + .read = seq_read, + .release = single_release, + .open = cnss_stats_open, + .owner = THIS_MODULE, + .llseek = seq_lseek, }; +static ssize_t cnss_dev_boot_debug_write(struct file *fp, + const char __user *user_buf, + size_t count, loff_t *off) +{ + struct cnss_plat_data *plat_priv = + ((struct seq_file *)fp->private_data)->private; + char buf[64]; + char *cmd; + unsigned int len = 0; + int ret = 0; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + cmd = buf; + + if (sysfs_streq(cmd, "on")) { + ret = cnss_power_on_device(plat_priv); + } else if (sysfs_streq(cmd, "enumerate")) { + ret = cnss_pci_init(plat_priv); + } else if (sysfs_streq(cmd, "download")) { + ret = cnss_pci_start_mhi(plat_priv->bus_priv); + } else { + cnss_pr_err("Device boot debugfs command is invalid\n"); + ret = -EINVAL; + } + + if (ret) + return ret; + + return count; +} + +static int cnss_dev_boot_debug_show(struct seq_file *s, void *data) +{ + seq_puts(s, "\nUsage: echo <action> > <debugfs_path>/cnss/dev_boot\n"); + seq_puts(s, "<action> can be one of below:\n"); + seq_puts(s, "on: turn on device power, assert WLAN_EN\n"); + seq_puts(s, "enumerate: de-assert PERST, enumerate PCIe\n"); + seq_puts(s, "download: download FW and do QMI handshake with FW\n"); + + return 0; +} + +static int cnss_dev_boot_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, cnss_dev_boot_debug_show, inode->i_private); +} + +static const struct file_operations cnss_dev_boot_debug_fops = { + .read = seq_read, + .write = cnss_dev_boot_debug_write, + .release = single_release, + .open = cnss_dev_boot_debug_open, + .owner = THIS_MODULE, + .llseek = seq_lseek, +}; + +#ifdef CONFIG_CNSS2_DEBUG +static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv) +{ + struct dentry *root_dentry = plat_priv->root_dentry; + + debugfs_create_file("dev_boot", 0600, root_dentry, plat_priv, + &cnss_dev_boot_debug_fops); + + return 0; +} +#else +static int cnss_create_debug_only_node(struct cnss_plat_data *plat_priv) +{ + return 0; +} +#endif + int cnss_debugfs_create(struct cnss_plat_data *plat_priv) { int ret = 0; @@ -139,11 +217,16 @@ int cnss_debugfs_create(struct cnss_plat_data *plat_priv) cnss_pr_err("Unable to create debugfs %d\n", ret); goto out; } + plat_priv->root_dentry = root_dentry; + debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv, &cnss_pin_connect_fops); debugfs_create_file("stats", 0644, root_dentry, plat_priv, &cnss_stats_fops); + + cnss_create_debug_only_node(plat_priv); + out: return ret; } diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index e114d0c51a07..8838a1319629 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -57,6 +57,7 @@ MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest"); enum cnss_debug_quirks { LINK_DOWN_SELF_RECOVERY, + SKIP_DEVICE_BOOT, }; unsigned long quirks; @@ -2232,13 +2233,15 @@ static int cnss_probe(struct platform_device *plat_dev) if (ret) goto reset_ctx; - ret = cnss_power_on_device(plat_priv); - if (ret) - goto free_res; + if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) { + ret = cnss_power_on_device(plat_priv); + if (ret) + goto free_res; - ret = cnss_pci_init(plat_priv); - if (ret) - goto power_off; + ret = cnss_pci_init(plat_priv); + if (ret) + goto power_off; + } ret = cnss_register_esoc(plat_priv); if (ret) @@ -2291,9 +2294,11 @@ unreg_bus_scale: unreg_esoc: cnss_unregister_esoc(plat_priv); deinit_pci: - cnss_pci_deinit(plat_priv); + if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) + cnss_pci_deinit(plat_priv); power_off: - cnss_power_off_device(plat_priv); + if (!test_bit(SKIP_DEVICE_BOOT, &quirks)) + cnss_power_off_device(plat_priv); free_res: cnss_put_resources(plat_priv); reset_ctx: diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index edc39af2d361..ce5a7b2bc88e 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -787,36 +787,6 @@ int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va, return 0; } -#ifdef CONFIG_CNSS_QCA6290 -#define PCI_MAX_BAR_SIZE 0xD00000 - -static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar, - unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = PCI_MAX_BAR_SIZE; - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len || !start) - return NULL; - - if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) { - if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO)) - return ioremap(start, len); - else - return ioremap_nocache(start, len); - } - - return NULL; -} -#else -static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar, - unsigned long maxlen) -{ - return pci_iomap(dev, bar, maxlen); -} -#endif - static struct cnss_msi_config msi_config = { .total_vectors = 32, .total_users = 4, @@ -1003,7 +973,7 @@ static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv) pci_set_master(pci_dev); - pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0); + pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0); if (!pci_priv->bar) { cnss_pr_err("Failed to do PCI IO map!\n"); ret = -EIO; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index d59769e858f4..019d7165a045 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2539,7 +2539,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, tasklet_hrtimer_init(&data->beacon_timer, mac80211_hwsim_beacon, - CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS); + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); spin_lock_bh(&hwsim_radio_lock); list_add_tail(&data->list, &hwsim_radios); diff --git a/drivers/net/wireless/wcnss/wcnss_vreg.c b/drivers/net/wireless/wcnss/wcnss_vreg.c index 82b90ad00f8b..d0a74744f70a 100644 --- a/drivers/net/wireless/wcnss/wcnss_vreg.c +++ b/drivers/net/wireless/wcnss/wcnss_vreg.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2015, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -417,13 +417,18 @@ static void wcnss_vregs_off(struct vregs_info regulators[], uint size, if (regulators[i].state == VREG_NULL_CONFIG) continue; + if (cfg->wcn_external_gpio_support) { + if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA))) + continue; + } + /* Remove PWM mode */ if (regulators[i].state & VREG_OPTIMUM_MODE_MASK) { - rc = regulator_set_optimum_mode( - regulators[i].regulator, 0); - if (rc < 0) - pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", - regulators[i].name, rc); + rc = regulator_set_load(regulators[i].regulator, 0); + if (rc < 0) { + pr_err("regulator set load(%s) failed (%d)\n", + regulators[i].name, rc); + } } /* Set voltage to lowest level */ @@ -478,7 +483,12 @@ static int wcnss_vregs_on(struct device *dev, } for (i = 0; i < size; i++) { - /* Get regulator source */ + if (cfg->wcn_external_gpio_support) { + if (!memcmp(regulators[i].name, VDD_PA, sizeof(VDD_PA))) + continue; + } + + /* Get regulator source */ regulators[i].regulator = regulator_get(dev, regulators[i].name); if (IS_ERR(regulators[i].regulator)) { @@ -518,11 +528,11 @@ static int wcnss_vregs_on(struct device *dev, /* Vote for PWM/PFM mode if needed */ if (voltage_level[i].uA_load && (reg_cnt > 0)) { - rc = regulator_set_optimum_mode(regulators[i].regulator, - voltage_level[i].uA_load); + rc = regulator_set_load(regulators[i].regulator, + voltage_level[i].uA_load); if (rc < 0) { - pr_err("regulator_set_optimum_mode(%s) failed (%d)\n", - regulators[i].name, rc); + pr_err("regulator set load(%s) failed (%d)\n", + regulators[i].name, rc); goto fail; } regulators[i].state |= VREG_OPTIMUM_MODE_MASK; diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c index 9347882fba92..e99d46ca51b0 100644 --- a/drivers/net/wireless/wcnss/wcnss_wlan.c +++ b/drivers/net/wireless/wcnss/wcnss_wlan.c @@ -36,6 +36,7 @@ #include <linux/qpnp/qpnp-adc.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_qos.h> +#include <linux/bitops.h> #include <soc/qcom/subsystem_restart.h> #include <soc/qcom/subsystem_notif.h> @@ -56,6 +57,7 @@ #define WCNSS_PM_QOS_TIMEOUT 15000 #define IS_CAL_DATA_PRESENT 0 #define WAIT_FOR_CBC_IND 2 +#define WCNSS_DUAL_BAND_CAPABILITY_OFFSET BIT(8) /* module params */ #define WCNSS_CONFIG_UNSPECIFIED (-1) @@ -119,6 +121,8 @@ static DEFINE_SPINLOCK(reg_spinlock); #define PRONTO_PMU_COM_CSR_OFFSET 0x1040 #define PRONTO_PMU_SOFT_RESET_OFFSET 0x104C +#define PRONTO_QFUSE_DUAL_BAND_OFFSET 0x0018 + #define A2XB_CFG_OFFSET 0x00 #define A2XB_INT_SRC_OFFSET 0x0c #define A2XB_TSTBUS_CTRL_OFFSET 0x14 @@ -381,6 +385,7 @@ static struct { void __iomem *pronto_saw2_base; void __iomem *pronto_pll_base; void __iomem *pronto_mcu_base; + void __iomem *pronto_qfuse; void __iomem *wlan_tx_status; void __iomem *wlan_tx_phy_aborts; void __iomem *wlan_brdg_err_source; @@ -423,6 +428,9 @@ static struct { int pc_disabled; struct delayed_work wcnss_pm_qos_del_req; struct mutex pm_qos_mutex; + struct clk *snoc_wcnss; + unsigned int snoc_wcnss_clock_freq; + bool is_dual_band_disabled; } *penv = NULL; static ssize_t wcnss_wlan_macaddr_store(struct device *dev, @@ -595,7 +603,31 @@ void wcnss_pronto_is_a2xb_bus_stall(void *tst_addr, u32 fifo_mask, char *type) } } -/* Log pronto debug registers before sending reset interrupt */ +int wcnss_get_dual_band_capability_info(struct platform_device *pdev) +{ + u32 reg = 0; + struct resource *res; + + res = platform_get_resource_byname( + pdev, IORESOURCE_MEM, "pronto_qfuse"); + if (!res) + return -EINVAL; + + penv->pronto_qfuse = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(penv->pronto_qfuse)) + return -ENOMEM; + + reg = readl_relaxed(penv->pronto_qfuse + + PRONTO_QFUSE_DUAL_BAND_OFFSET); + if (reg & WCNSS_DUAL_BAND_CAPABILITY_OFFSET) + penv->is_dual_band_disabled = true; + else + penv->is_dual_band_disabled = false; + + return 0; +} + +/* Log pronto debug registers during SSR Timeout CB */ void wcnss_pronto_log_debug_regs(void) { void __iomem *reg_addr, *tst_addr, *tst_ctrl_addr; @@ -1683,6 +1715,14 @@ int wcnss_wlan_iris_xo_mode(void) } EXPORT_SYMBOL(wcnss_wlan_iris_xo_mode); +int wcnss_wlan_dual_band_disabled(void) +{ + if (penv && penv->pdev) + return penv->is_dual_band_disabled; + + return -EINVAL; +} +EXPORT_SYMBOL(wcnss_wlan_dual_band_disabled); void wcnss_suspend_notify(void) { @@ -2717,23 +2757,23 @@ wcnss_trigger_config(struct platform_device *pdev) int is_pronto_vadc; int is_pronto_v3; int pil_retry = 0; - int has_pronto_hw = of_property_read_bool(pdev->dev.of_node, - "qcom,has-pronto-hw"); - - is_pronto_vadc = of_property_read_bool(pdev->dev.of_node, - "qcom,is-pronto-vadc"); + struct wcnss_wlan_config *wlan_cfg = &penv->wlan_config; + struct device_node *node = (&pdev->dev)->of_node; + int has_pronto_hw = of_property_read_bool(node, "qcom,has-pronto-hw"); - is_pronto_v3 = of_property_read_bool(pdev->dev.of_node, - "qcom,is-pronto-v3"); + is_pronto_vadc = of_property_read_bool(node, "qcom,is-pronto-vadc"); + is_pronto_v3 = of_property_read_bool(node, "qcom,is-pronto-v3"); - penv->is_vsys_adc_channel = of_property_read_bool(pdev->dev.of_node, - "qcom,has-vsys-adc-channel"); + penv->is_vsys_adc_channel = + of_property_read_bool(node, "qcom,has-vsys-adc-channel"); + penv->is_a2xb_split_reg = + of_property_read_bool(node, "qcom,has-a2xb-split-reg"); - penv->is_a2xb_split_reg = of_property_read_bool(pdev->dev.of_node, - "qcom,has-a2xb-split-reg"); + wlan_cfg->wcn_external_gpio_support = + of_property_read_bool(node, "qcom,wcn-external-gpio-support"); - if (of_property_read_u32(pdev->dev.of_node, - "qcom,wlan-rx-buff-count", &penv->wlan_rx_buff_count)) { + if (of_property_read_u32(node, "qcom,wlan-rx-buff-count", + &penv->wlan_rx_buff_count)) { penv->wlan_rx_buff_count = WCNSS_DEF_WLAN_RX_BUFF_COUNT; } @@ -2794,15 +2834,18 @@ wcnss_trigger_config(struct platform_device *pdev) goto fail; } - index++; - ret = wcnss_dt_parse_vreg_level(&pdev->dev, index, - "qcom,iris-vddpa-current", - "qcom,iris-vddpa-voltage-level", - penv->wlan_config.iris_vlevel); - - if (ret) { - dev_err(&pdev->dev, "error reading voltage-level property\n"); - goto fail; + if (!wlan_cfg->wcn_external_gpio_support) { + index++; + ret = wcnss_dt_parse_vreg_level( + &pdev->dev, index, + "qcom,iris-vddpa-current", + "qcom,iris-vddpa-voltage-level", + penv->wlan_config.iris_vlevel); + if (ret) { + dev_err(&pdev->dev, + "error reading voltage-level property\n"); + goto fail; + } } index++; @@ -2825,8 +2868,8 @@ wcnss_trigger_config(struct platform_device *pdev) pdata = pdev->dev.platform_data; if (WCNSS_CONFIG_UNSPECIFIED == has_48mhz_xo) { if (has_pronto_hw) { - has_48mhz_xo = of_property_read_bool(pdev->dev.of_node, - "qcom,has-48mhz-xo"); + has_48mhz_xo = + of_property_read_bool(node, "qcom,has-48mhz-xo"); } else { has_48mhz_xo = pdata->has_48mhz_xo; } @@ -2837,8 +2880,8 @@ wcnss_trigger_config(struct platform_device *pdev) penv->wlan_config.is_pronto_v3 = is_pronto_v3; if (WCNSS_CONFIG_UNSPECIFIED == has_autodetect_xo && has_pronto_hw) { - has_autodetect_xo = of_property_read_bool(pdev->dev.of_node, - "qcom,has-autodetect-xo"); + has_autodetect_xo = + of_property_read_bool(node, "qcom,has-autodetect-xo"); } penv->thermal_mitigation = 0; @@ -3118,6 +3161,16 @@ wcnss_trigger_config(struct platform_device *pdev) __func__); goto fail_ioremap2; } + + if (of_property_read_bool(node, + "qcom,is-dual-band-disabled")) { + ret = wcnss_get_dual_band_capability_info(pdev); + if (ret) { + pr_err( + "%s: failed to get dual band info\n", __func__); + goto fail_ioremap2; + } + } } penv->adc_tm_dev = qpnp_get_adc_tm(&penv->pdev->dev, "wcnss"); @@ -3129,6 +3182,21 @@ wcnss_trigger_config(struct platform_device *pdev) penv->fw_vbatt_state = WCNSS_CONFIG_UNSPECIFIED; } + penv->snoc_wcnss = devm_clk_get(&penv->pdev->dev, "snoc_wcnss"); + if (IS_ERR(penv->snoc_wcnss)) { + pr_err("%s: couldn't get snoc_wcnss\n", __func__); + penv->snoc_wcnss = NULL; + } else { + if (of_property_read_u32(pdev->dev.of_node, + "qcom,snoc-wcnss-clock-freq", + &penv->snoc_wcnss_clock_freq)) { + pr_debug("%s: wcnss snoc clock frequency is not defined\n", + __func__); + devm_clk_put(&penv->pdev->dev, penv->snoc_wcnss); + penv->snoc_wcnss = NULL; + } + } + if (penv->wlan_config.is_pronto_vadc) { penv->vadc_dev = qpnp_get_vadc(&penv->pdev->dev, "wcnss"); @@ -3191,6 +3259,38 @@ fail: return ret; } +/* Driver requires to directly vote the snoc clocks + * To enable and disable snoc clock, it call + * wcnss_snoc_vote function + */ +void wcnss_snoc_vote(bool clk_chk_en) +{ + int rc; + + if (!penv->snoc_wcnss) { + pr_err("%s: couldn't get clk snoc_wcnss\n", __func__); + return; + } + + if (clk_chk_en) { + rc = clk_set_rate(penv->snoc_wcnss, + penv->snoc_wcnss_clock_freq); + if (rc) { + pr_err("%s: snoc_wcnss_clk-clk_set_rate failed =%d\n", + __func__, rc); + return; + } + + if (clk_prepare_enable(penv->snoc_wcnss)) { + pr_err("%s: snoc_wcnss clk enable failed\n", __func__); + return; + } + } else { + clk_disable_unprepare(penv->snoc_wcnss); + } +} +EXPORT_SYMBOL(wcnss_snoc_vote); + /* wlan prop driver cannot invoke cancel_work_sync * function directly, so to invoke this function it * call wcnss_flush_work function diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 8e11fb2831cd..34f1d6b41fb9 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -741,6 +741,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; BUG_ON(size <= 0); @@ -805,6 +807,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long)iova, size); @@ -908,6 +914,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) + return 0; DBG_RUN_SG("%s() START %d entries\n", __func__, nents); @@ -980,6 +988,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, BUG_ON(!dev); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } DBG_RUN_SG("%s() START %d entries, %p,%x\n", __func__, nents, sg_virt(sglist), sglist->length); diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index a0580afe1713..7b0ca1551d7b 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -154,7 +154,10 @@ struct dino_device }; /* Looks nice and keeps the compiler happy */ -#define DINO_DEV(d) ((struct dino_device *) d) +#define DINO_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct dino_device *)__pdata; }) /* diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 42844c2bc065..d0c2759076a2 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -111,8 +111,10 @@ static u32 lba_t32; /* Looks nice and keeps the compiler happy */ -#define LBA_DEV(d) ((struct lba_device *) (d)) - +#define LBA_DEV(d) ({ \ + void *__pdata = d; \ + BUG_ON(!__pdata); \ + (struct lba_device *)__pdata; }) /* ** Only allow 8 subsidiary busses per LBA diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 225049b492e5..d6326144ce01 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask) return 0; ioc = GET_IOC(dev); + if (!ioc) + return 0; /* * check if mask is >= than the current max IO Virt Address @@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int pide; ioc = GET_IOC(dev); + if (!ioc) + return DMA_ERROR_CODE; /* save offset bits */ offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; @@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } offset = iova & ~IOVP_MASK; iova ^= offset; /* clear offset bits */ size += offset; @@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, DBG_RUN_SG("%s() START %d entries\n", __func__, nents); ioc = GET_IOC(dev); + if (!ioc) + return 0; /* Fast path single entry scatterlists. */ if (nents == 1) { @@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, __func__, nents, sg_virt(sglist), sglist->length); ioc = GET_IOC(dev); + if (!ioc) { + WARN_ON(!ioc); + return; + } #ifdef SBA_COLLECT_STATS ioc->usg_calls++; diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 217c7ce3f57b..8e66cd5770b5 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -278,6 +278,7 @@ #define PERST_PROPAGATION_DELAY_US_MIN 1000 #define PERST_PROPAGATION_DELAY_US_MAX 1005 +#define SWITCH_DELAY_MAX 20 #define REFCLK_STABILIZATION_DELAY_US_MIN 1000 #define REFCLK_STABILIZATION_DELAY_US_MAX 1005 #define LINK_UP_TIMEOUT_US_MIN 5000 @@ -626,6 +627,7 @@ struct msm_pcie_dev_t { bool ext_ref_clk; bool common_phy; uint32_t ep_latency; + uint32_t switch_latency; uint32_t wr_halt_size; uint32_t cpl_timeout; uint32_t current_bdf; @@ -1735,7 +1737,8 @@ static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev) static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev) { - int ret, scm_ret; + int ret; + u64 scm_ret; if (!dev) { pr_err("PCIe: the input pcie dev is NULL.\n"); @@ -1745,7 +1748,7 @@ static int msm_pcie_restore_sec_config(struct msm_pcie_dev_t *dev) ret = scm_restore_sec_cfg(dev->scm_dev_id, 0, &scm_ret); if (ret || scm_ret) { PCIE_ERR(dev, - "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%d\n", + "PCIe: RC%d failed(%d) to restore sec config, scm_ret=%llu\n", dev->rc_idx, ret, scm_ret); return ret ? ret : -EINVAL; } @@ -1984,6 +1987,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev) dev->common_phy); PCIE_DBG_FS(dev, "ep_latency: %dms\n", dev->ep_latency); + PCIE_DBG_FS(dev, "switch_latency: %dms\n", + dev->switch_latency); PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n", dev->wr_halt_size); PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n", @@ -4675,7 +4680,15 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) goto link_fail; } - msleep(500); + if (dev->switch_latency) { + PCIE_DBG(dev, "switch_latency: %dms\n", + dev->switch_latency); + if (dev->switch_latency <= SWITCH_DELAY_MAX) + usleep_range(dev->switch_latency * 1000, + dev->switch_latency * 1000); + else + msleep(dev->switch_latency); + } msm_pcie_config_controller(dev); @@ -6279,6 +6292,20 @@ static int msm_pcie_probe(struct platform_device *pdev) PCIE_DBG(&msm_pcie_dev[rc_idx], "RC%d: ep-latency: 0x%x.\n", rc_idx, msm_pcie_dev[rc_idx].ep_latency); + msm_pcie_dev[rc_idx].switch_latency = 0; + ret = of_property_read_u32((&pdev->dev)->of_node, + "qcom,switch-latency", + &msm_pcie_dev[rc_idx].switch_latency); + + if (ret) + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: switch-latency does not exist.\n", + rc_idx); + else + PCIE_DBG(&msm_pcie_dev[rc_idx], + "RC%d: switch-latency: 0x%x.\n", + rc_idx, msm_pcie_dev[rc_idx].switch_latency); + msm_pcie_dev[rc_idx].wr_halt_size = 0; ret = of_property_read_u32(pdev->dev.of_node, "qcom,wr-halt-size", diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 63ec68e6ac2a..39400dda27c2 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -552,6 +552,7 @@ static void armpmu_init(struct arm_pmu *armpmu) .stop = armpmu_stop, .read = armpmu_read, .filter_match = armpmu_filter_match, + .events_across_hotplug = 1, }; } diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index 6bbda6b4ab50..5da9c95dccb7 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c @@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, return 0; } +static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) +{ + u32 tmp; + + tmp = readl(reg); + tmp &= ~(mask << shift); + tmp |= value << shift; + writel(tmp, reg); +} + static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned group) { @@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, reg += bank * 0x20 + pin / 16 * 0x10; shift = pin % 16 * 2; - writel(0x3 << shift, reg + CLR); - writel(g->muxsel[i] << shift, reg + SET); + mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); } return 0; @@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev, /* mA */ if (config & MA_PRESENT) { shift = pin % 8 * 4; - writel(0x3 << shift, reg + CLR); - writel(ma << shift, reg + SET); + mxs_pinctrl_rmwl(ma, 0x3, shift, reg); } /* vol */ diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index 9677807db364..b505b87661f8 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = { static const char * const nand_groups[] = { "nand_io", "nand_io_ce0", "nand_io_ce1", "nand_io_rb0", "nand_ale", "nand_cle", - "nand_wen_clk", "nand_ren_clk", "nand_dqs0", - "nand_dqs1" + "nand_wen_clk", "nand_ren_clk", "nand_dqs_0", + "nand_dqs_1" }; static const char * const nor_groups[] = { diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index 2b0d70217bbd..699efb1a8c45 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -543,6 +543,9 @@ static int sh_pfc_probe(struct platform_device *pdev) ret = info->ops->init(pfc); if (ret < 0) return ret; + + /* .init() may have overridden pfc->info */ + info = pfc->info; } /* Enable dummy states for those platforms without pinctrl support */ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 87a4f44147c1..42ffa8708abc 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -1102,7 +1102,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4), PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT), PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1), - PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0), + PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0), PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0), PINMUX_IPSR_DATA(IP6_9_8, IRQ0), PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 90b973e15982..a7c81e988656 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c @@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ + SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out")), diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c index 51806cec1e4d..49aa7f25347d 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c @@ -649,8 +649,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; ipa_insert_failed: - if (offset) - list_move(&offset->link, + list_move(&offset->link, &htbl->head_free_offset_list[offset->bin]); entry->offset_entry = NULL; list_del(&entry->link); diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c index dd591407d10f..5228b2db1410 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1436,6 +1436,66 @@ struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = { start_ipv6_filter_idx), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_FILTERS_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x17, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + rule_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c index 011ca300cc09..0531919487d7 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c @@ -1399,7 +1399,7 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl) { struct ipa_rt_tbl *entry; enum ipa_ip_type ip = IPA_IP_MAX; - int result; + int result = 0; mutex_lock(&ipa_ctx->lock); entry = ipa_id_find(rt_tbl_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 834712a71ac6..5dbd43b44540 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -1878,7 +1878,9 @@ void q6_deinitialize_rm(void) if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, ret); - destroy_workqueue(ipa_rm_q6_workqueue); + + if (ipa_rm_q6_workqueue) + destroy_workqueue(ipa_rm_q6_workqueue); } static void wake_tx_queue(struct work_struct *work) @@ -2187,7 +2189,10 @@ timer_init_err: IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); create_rsrc_err: - q6_deinitialize_rm(); + + if (!atomic_read(&is_ssr)) + q6_deinitialize_rm(); + q6_init_err: free_netdev(ipa_netdevs[0]); ipa_netdevs[0] = NULL; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index 7c3b5838242e..ce35ba02154d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -426,8 +426,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, return 0; ipa_insert_failed: - if (offset) - list_move(&offset->link, + list_move(&offset->link, &htbl->head_free_offset_list[offset->bin]); entry->offset_entry = NULL; list_del(&entry->link); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c index 690a9db67ff0..571852c076ea 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c @@ -807,6 +807,11 @@ int ipa3_qmi_filter_notify_send( return -EINVAL; } + if (req->source_pipe_index == -1) { + IPAWANERR("Source pipe index invalid\n"); + return -EINVAL; + } + mutex_lock(&ipa3_qmi_lock); if (ipa3_qmi_ctx != NULL) { /* cache the qmi_filter_request */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index d5d850309696..e6f1e2ce0b75 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -121,6 +121,31 @@ extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[]; extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; + extern struct elem_info + ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[]; + extern struct elem_info + ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[]; + extern struct elem_info + ipa3_ul_firewall_rule_type_data_v01_ei[]; + extern struct elem_info + ipa3_ul_firewall_config_result_type_data_v01_ei[]; + extern struct elem_info + ipa3_per_client_stats_info_type_data_v01_ei[]; + extern struct elem_info + ipa3_enable_per_client_stats_req_msg_data_v01_ei[]; + extern struct elem_info + ipa3_enable_per_client_stats_resp_msg_data_v01_ei[]; + extern struct elem_info + ipa3_get_stats_per_client_req_msg_data_v01_ei[]; + extern struct elem_info + ipa3_get_stats_per_client_resp_msg_data_v01_ei[]; + extern struct elem_info + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[]; + extern struct elem_info + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[]; + extern struct elem_info + ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[]; + /** * struct ipa3_rmnet_context - IPA rmnet context * @ipa_rmnet_ssr: support modem SSR diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c index 6a5cb4891c02..746863732dc5 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -16,6 +16,8 @@ #include <soc/qcom/msm_qmi_interface.h> +#include "ipa_qmi_service.h" + /* Type Definitions */ static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = { { @@ -1756,6 +1758,36 @@ struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = { rule_id), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_IPA_MAX_CLIENT_DST_PIPES_V01, + .elem_size = sizeof(uint32_t), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x18, + .offset = offsetof( + struct ipa_fltr_installed_notif_req_msg_v01, + dst_pipe_id), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, .tlv_type = QMI_COMMON_TLV_TYPE, @@ -2923,3 +2955,435 @@ struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[] = { .tlv_type = QMI_COMMON_TLV_TYPE, }, }; + +struct elem_info ipa3_per_client_stats_info_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + src_pipe_id), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv4_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv6_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv4_bytes), + + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv6_bytes), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv4_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_ul_ipv6_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv4_pkts), + + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_per_client_stats_info_type_v01, + num_dl_ipv6_pkts), + + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_ul_firewall_rule_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_rule_type_v01, + ip_type), + }, + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct ipa_filter_rule_type_v01), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct ipa_ul_firewall_rule_type_v01, + filter_rule), + .ei_array = ipa3_filter_rule_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_ul_firewall_config_result_type_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_config_result_type_v01, + is_success), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof( + struct ipa_ul_firewall_config_result_type_v01, + mux_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_enable_per_client_stats_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + ipa_enable_per_client_stats_req_msg_v01, + enable_per_client_stats), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_enable_per_client_stats_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_enable_per_client_stats_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_get_stats_per_client_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + src_pipe_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + reset_stats_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_req_msg_v01, + reset_stats), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info ipa3_get_stats_per_client_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_PER_CLIENTS_V01, + .elem_size = + sizeof(struct ipa_per_client_stats_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_get_stats_per_client_resp_msg_v01, + per_client_stats_list), + .ei_array = + ipa3_per_client_stats_info_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info + ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x1, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + firewall_rules_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_IPA_MAX_UL_FIREWALL_RULES_V01, + .elem_size = sizeof(struct ipa_ul_firewall_rule_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x1, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + firewall_rules_list), + .ei_array = + ipa3_ul_firewall_rule_type_data_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x2, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + mux_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + disable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + disable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + are_blacklist_filters_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_req_msg_v01, + are_blacklist_filters), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info + ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info + ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof( + struct ipa_ul_firewall_config_result_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct ipa_configure_ul_firewall_rules_ind_msg_v01, + result), + .ei_array = + ipa3_ul_firewall_config_result_type_data_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c index bc7cc7060545..8e790c89ed13 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c @@ -1479,7 +1479,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) { struct ipa3_rt_tbl *entry; enum ipa_ip_type ip = IPA_IP_MAX; - int result; + int result = 0; mutex_lock(&ipa3_ctx->lock); entry = ipa3_id_find(rt_tbl_hdl); @@ -1501,6 +1501,7 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl) ip = IPA_IP_v6; else { WARN_ON(1); + result = -EINVAL; goto ret; } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index e8bd0cd2ffb5..545c2b599a6f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2861,7 +2861,7 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[], IPAHAL_FULL_PIPELINE_CLEAR; reg_write_agg_close.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE); - ipahal_get_aggr_force_close_valmask(1<<i, &valmask); + ipahal_get_aggr_force_close_valmask(i, &valmask); reg_write_agg_close.value = valmask.val; reg_write_agg_close.value_mask = valmask.mask; cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 0db9f30181a7..594fd9bcfc02 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -1576,6 +1576,11 @@ void ipahal_get_aggr_force_close_valmask(int ep_idx, IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5; } + if (ep_idx > (sizeof(valmask->val) * 8 - 1)) { + IPAHAL_ERR("too big ep_idx %d\n", ep_idx); + ipa_assert(); + return; + } IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk); valmask->mask = bmsk << shft; } diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index 039bc7da5153..8fbde6675070 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -702,6 +702,11 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void) /* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/ req->source_pipe_index = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD); + if (req->source_pipe_index == IPA_EP_NOT_ALLOCATED) { + IPAWANERR("ep mapping failed\n"); + retval = -EFAULT; + } + req->install_status = QMI_RESULT_SUCCESS_V01; req->rule_id_valid = 1; req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules; @@ -1947,7 +1952,9 @@ void ipa3_q6_deinitialize_rm(void) if (ret < 0) IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_Q6_PROD, ret); - destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); + + if (rmnet_ipa3_ctx->rm_q6_wq) + destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq); } static void ipa3_wake_tx_queue(struct work_struct *work) @@ -2287,7 +2294,10 @@ timer_init_err: IPAWANERR("Error deleting resource %d, ret=%d\n", IPA_RM_RESOURCE_WWAN_0_PROD, ret); create_rsrc_err: - ipa3_q6_deinitialize_rm(); + + if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) + ipa3_q6_deinitialize_rm(); + q6_init_err: free_netdev(dev); rmnet_ipa3_ctx->wwan_priv = NULL; diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c index a5936ea5a6aa..ce6d1257cfbb 100644 --- a/drivers/platform/msm/mhi/mhi_iface.c +++ b/drivers/platform/msm/mhi/mhi_iface.c @@ -509,7 +509,7 @@ static int __exit mhi_plat_remove(struct platform_device *pdev) static int __init mhi_init(void) { - int r; + int r = -EAGAIN; struct mhi_device_driver *mhi_dev_drv; mhi_dev_drv = kmalloc(sizeof(*mhi_dev_drv), GFP_KERNEL); diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c index b6edf707798b..a95579241524 100644 --- a/drivers/platform/msm/mhi/mhi_init.c +++ b/drivers/platform/msm/mhi/mhi_init.c @@ -141,7 +141,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt) size_t mhi_mem_index = 0, ring_len; void *dev_mem_start; dma_addr_t dma_dev_mem_start; - int i, r; + int i; mhi_dev_ctxt->dev_space.dev_mem_len = calculate_mhi_space(mhi_dev_ctxt); @@ -244,7 +244,7 @@ err_ev_alloc: mhi_dev_ctxt->dev_space.dev_mem_len, mhi_dev_ctxt->dev_space.dev_mem_start, mhi_dev_ctxt->dev_space.dma_dev_mem_start); - return r; + return -EFAULT; } static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt) diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c index a991a2e68b34..18d0334ce1ec 100644 --- a/drivers/platform/msm/mhi/mhi_mmio_ops.c +++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -109,7 +109,6 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) u64 pcie_dword_val = 0; u32 pcie_word_val = 0; u32 i = 0; - int ret_val; mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n"); @@ -131,7 +130,7 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt) if (mhi_dev_ctxt->core.mhi_ver != MHI_VERSION) { mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n", mhi_dev_ctxt->core.mhi_ver); - return ret_val; + return -ENXIO; } /* Enable the channels */ diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c index 1d9282627d4e..b1434daf1f60 100644 --- a/drivers/platform/msm/mhi/mhi_sys.c +++ b/drivers/platform/msm/mhi/mhi_sys.c @@ -329,7 +329,7 @@ uintptr_t mhi_p2v_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE type, u32 chan, uintptr_t phy_ptr) { - uintptr_t virtual_ptr; + uintptr_t virtual_ptr = 0; struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt; switch (type) { @@ -358,7 +358,7 @@ dma_addr_t mhi_v2p_addr(struct mhi_device_ctxt *mhi_dev_ctxt, enum MHI_RING_TYPE type, u32 chan, uintptr_t va_ptr) { - dma_addr_t phy_ptr; + dma_addr_t phy_ptr = 0; struct mhi_ring_ctxt *cs = &mhi_dev_ctxt->dev_space.ring_ctxt; switch (type) { diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c index 5b50666d30a2..9c35eeb177d9 100644 --- a/drivers/platform/msm/mhi_uci/mhi_uci.c +++ b/drivers/platform/msm/mhi_uci/mhi_uci.c @@ -1030,7 +1030,7 @@ error_dts: static void process_rs232_state(struct uci_client *ctrl_client, struct mhi_result *result) { - struct rs232_ctrl_msg *rs232_pkt; + struct rs232_ctrl_msg *rs232_pkt = result->buf_addr; struct uci_client *client = NULL; struct mhi_uci_ctxt_t *uci_ctxt = ctrl_client->uci_ctxt; u32 msg_id; @@ -1051,7 +1051,6 @@ static void process_rs232_state(struct uci_client *ctrl_client, sizeof(struct rs232_ctrl_msg)); goto error_size; } - rs232_pkt = result->buf_addr; MHI_GET_CTRL_DEST_ID(CTRL_DEST_ID, rs232_pkt, chan); for (i = 0; i < MHI_SOFTWARE_CLIENT_LIMIT; i++) if (chan == uci_ctxt->client_handles[i].out_attr.chan_id || diff --git a/drivers/platform/msm/usb_bam.c b/drivers/platform/msm/usb_bam.c index a151d0c7a770..48d31140834a 100644 --- a/drivers/platform/msm/usb_bam.c +++ b/drivers/platform/msm/usb_bam.c @@ -235,10 +235,10 @@ void msm_bam_set_hsic_host_dev(struct device *dev) if (dev) { /* Hold the device until allowing lpm */ info[HSIC_CTRL].in_lpm = false; - log_event_dbg("%s: Getting hsic device %p\n", __func__, dev); + log_event_dbg("%s: Getting hsic device %pK\n", __func__, dev); pm_runtime_get(dev); } else if (host_info[HSIC_CTRL].dev) { - log_event_dbg("%s: Try Putting hsic device %p, lpm:%d\n", + log_event_dbg("%s: Try Putting hsic device %pK, lpm:%d\n", __func__, host_info[HSIC_CTRL].dev, info[HSIC_CTRL].in_lpm); /* Just release previous device if not already done */ @@ -825,7 +825,7 @@ static bool _hsic_host_bam_resume_core(void) /* Exit from "full suspend" in case of hsic host */ if (host_info[HSIC_CTRL].dev && info[HSIC_CTRL].in_lpm) { - log_event_dbg("%s: Getting hsic device %p\n", __func__, + log_event_dbg("%s: Getting hsic device %pK\n", __func__, host_info[HSIC_CTRL].dev); pm_runtime_get(host_info[HSIC_CTRL].dev); info[HSIC_CTRL].in_lpm = false; @@ -839,7 +839,7 @@ static void _hsic_host_bam_suspend_core(void) log_event_dbg("%s: enter\n", __func__); if (host_info[HSIC_CTRL].dev && !info[HSIC_CTRL].in_lpm) { - log_event_dbg("%s: Putting hsic host device %p\n", __func__, + log_event_dbg("%s: Putting hsic host device %pK\n", __func__, host_info[HSIC_CTRL].dev); pm_runtime_put(host_info[HSIC_CTRL].dev); info[HSIC_CTRL].in_lpm = true; @@ -1724,7 +1724,7 @@ static bool check_pipes_empty(enum usb_ctrl bam_type, u8 src_idx, u8 dst_idx) /* If we have any remaints in the pipes we don't go to sleep */ prod_pipe = ctx->usb_bam_sps.sps_pipes[src_idx]; cons_pipe = ctx->usb_bam_sps.sps_pipes[dst_idx]; - log_event_dbg("prod_pipe=%p, cons_pipe=%p\n", prod_pipe, cons_pipe); + log_event_dbg("prod_pipe=%pK, cons_pipe=%pK\n", prod_pipe, cons_pipe); if (!cons_pipe || (!prod_pipe && prod_pipe_connect->pipe_type == USB_BAM_PIPE_BAM2BAM)) { @@ -2091,7 +2091,7 @@ static bool msm_bam_host_lpm_ok(enum usb_ctrl bam_type) } /* HSIC host will go now to lpm */ - log_event_dbg("%s: vote for suspend hsic %p\n", + log_event_dbg("%s: vote for suspend hsic %pK\n", __func__, host_info[bam_type].dev); for (i = 0; i < ctx->max_connections; i++) { @@ -2454,7 +2454,7 @@ static void usb_bam_work(struct work_struct *w) if (pipe_iter->bam_type == pipe_connect->bam_type && pipe_iter->dir == PEER_PERIPHERAL_TO_USB && pipe_iter->enabled) { - log_event_dbg("%s: Register wakeup on pipe %p\n", + log_event_dbg("%s: Register wakeup on pipe %pK\n", __func__, pipe_iter); __usb_bam_register_wake_cb( pipe_connect->bam_type, i, diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 0dd6017245fa..cf99eb9c2ba0 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -297,6 +297,8 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(hw_current_max), POWER_SUPPLY_ATTR(real_type), POWER_SUPPLY_ATTR(pr_swap), + POWER_SUPPLY_ATTR(cc_step), + POWER_SUPPLY_ATTR(cc_step_sel), /* Local extensions of type int64_t */ POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 947108c1410e..d3932ca1f338 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -49,7 +49,7 @@ #define SRAM_READ "fg_sram_read" #define SRAM_WRITE "fg_sram_write" #define PROFILE_LOAD "fg_profile_load" -#define DELTA_SOC "fg_delta_soc" +#define TTF_PRIMING "fg_ttf_priming" /* Delta BSOC irq votable reasons */ #define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq" @@ -81,6 +81,8 @@ #define BATT_THERM_NUM_COEFFS 3 +#define MAX_CC_STEPS 20 + /* Debug flag definitions */ enum fg_debug_flag { FG_IRQ = BIT(0), /* Show interrupts */ @@ -228,6 +230,11 @@ enum esr_timer_config { NUM_ESR_TIMERS, }; +enum ttf_mode { + TTF_MODE_NORMAL = 0, + TTF_MODE_QNOVO, +}; + /* DT parameters for FG device */ struct fg_dt_props { bool force_load_profile; @@ -309,16 +316,31 @@ struct fg_irq_info { }; struct fg_circ_buf { - int arr[20]; + int arr[10]; int size; int head; }; +struct fg_cc_step_data { + int arr[MAX_CC_STEPS]; + int sel; +}; + struct fg_pt { s32 x; s32 y; }; +struct ttf { + struct fg_circ_buf ibatt; + struct fg_circ_buf vbatt; + struct fg_cc_step_data cc_step; + struct mutex lock; + int mode; + int last_ttf; + s64 last_ms; +}; + static const struct fg_pt fg_ln_table[] = { { 1000, 0 }, { 2000, 693 }, @@ -358,6 +380,7 @@ struct fg_chip { struct power_supply *usb_psy; struct power_supply *dc_psy; struct power_supply *parallel_psy; + struct power_supply *pc_port_psy; struct iio_channel *batt_id_chan; struct iio_channel *die_temp_chan; struct fg_memif *sram; @@ -374,9 +397,9 @@ struct fg_chip { struct fg_cyc_ctr_data cyc_ctr; struct notifier_block nb; struct fg_cap_learning cl; + struct ttf ttf; struct mutex bus_lock; struct mutex sram_rw_lock; - struct mutex batt_avg_lock; struct mutex charge_full_lock; u32 batt_soc_base; u32 batt_info_base; @@ -389,6 +412,7 @@ struct fg_chip { int prev_charge_status; int charge_done; int charge_type; + int online_status; int last_soc; int last_batt_temp; int health; @@ -413,11 +437,8 @@ struct fg_chip { struct completion soc_ready; struct delayed_work profile_load_work; struct work_struct status_change_work; - struct work_struct cycle_count_work; - struct delayed_work batt_avg_work; + struct delayed_work ttf_work; struct delayed_work sram_dump_work; - struct fg_circ_buf ibatt_circ_buf; - struct fg_circ_buf vbatt_circ_buf; }; /* Debugfs data structures are below */ @@ -475,5 +496,6 @@ extern bool is_qnovo_en(struct fg_chip *chip); extern void fg_circ_buf_add(struct fg_circ_buf *, int); extern void fg_circ_buf_clr(struct fg_circ_buf *); extern int fg_circ_buf_avg(struct fg_circ_buf *, int *); +extern int fg_circ_buf_median(struct fg_circ_buf *, int *); extern int fg_lerp(const struct fg_pt *, size_t, s32, s32 *); #endif diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index 9635044e02a5..0cb1dea7113b 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -10,6 +10,7 @@ * GNU General Public License for more details. */ +#include <linux/sort.h> #include "fg-core.h" void fg_circ_buf_add(struct fg_circ_buf *buf, int val) @@ -39,6 +40,39 @@ int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg) return 0; } +static int cmp_int(const void *a, const void *b) +{ + return *(int *)a - *(int *)b; +} + +int fg_circ_buf_median(struct fg_circ_buf *buf, int *median) +{ + int *temp; + + if (buf->size == 0) + return -ENODATA; + + if (buf->size == 1) { + *median = buf->arr[0]; + return 0; + } + + temp = kmalloc_array(buf->size, sizeof(*temp), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + memcpy(temp, buf->arr, buf->size * sizeof(*temp)); + sort(temp, buf->size, sizeof(*temp), cmp_int, NULL); + + if (buf->size % 2) + *median = temp[buf->size / 2]; + else + *median = (temp[buf->size / 2 - 1] + temp[buf->size / 2]) / 2; + + kfree(temp); + return 0; +} + int fg_lerp(const struct fg_pt *pts, size_t tablesize, s32 input, s32 *output) { int i; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index e03681ec13cc..cb2c3888ddd7 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -75,6 +75,8 @@ #define ESR_TIMER_CHG_MAX_OFFSET 0 #define ESR_TIMER_CHG_INIT_WORD 18 #define ESR_TIMER_CHG_INIT_OFFSET 2 +#define ESR_EXTRACTION_ENABLE_WORD 19 +#define ESR_EXTRACTION_ENABLE_OFFSET 0 #define PROFILE_LOAD_WORD 24 #define PROFILE_LOAD_OFFSET 0 #define ESR_RSLOW_DISCHG_WORD 34 @@ -1171,6 +1173,42 @@ static bool batt_psy_initialized(struct fg_chip *chip) return true; } +static bool usb_psy_initialized(struct fg_chip *chip) +{ + if (chip->usb_psy) + return true; + + chip->usb_psy = power_supply_get_by_name("usb"); + if (!chip->usb_psy) + return false; + + return true; +} + +static bool pc_port_psy_initialized(struct fg_chip *chip) +{ + if (chip->pc_port_psy) + return true; + + chip->pc_port_psy = power_supply_get_by_name("pc_port"); + if (!chip->pc_port_psy) + return false; + + return true; +} + +static bool dc_psy_initialized(struct fg_chip *chip) +{ + if (chip->dc_psy) + return true; + + chip->dc_psy = power_supply_get_by_name("dc"); + if (!chip->dc_psy) + return false; + + return true; +} + static bool is_parallel_charger_available(struct fg_chip *chip) { if (!chip->parallel_psy) @@ -1611,7 +1649,7 @@ static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv) static int fg_charge_full_update(struct fg_chip *chip) { union power_supply_propval prop = {0, }; - int rc, msoc, bsoc, recharge_soc; + int rc, msoc, bsoc, recharge_soc, msoc_raw; u8 full_soc[2] = {0xFF, 0xFF}; if (!chip->dt.hold_soc_while_full) @@ -1647,6 +1685,7 @@ static int fg_charge_full_update(struct fg_chip *chip) pr_err("Error in getting msoc, rc=%d\n", rc); goto out; } + msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY); fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n", msoc, bsoc, chip->health, chip->charge_status, @@ -1670,7 +1709,7 @@ static int fg_charge_full_update(struct fg_chip *chip) fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n", msoc); } - } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) { + } else if (msoc_raw < recharge_soc && chip->charge_full) { chip->delta_soc = FULL_CAPACITY - msoc; /* @@ -1700,8 +1739,8 @@ static int fg_charge_full_update(struct fg_chip *chip) rc); goto out; } - fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n", - bsoc >> 8, recharge_soc, chip->delta_soc); + fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n", + msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc); } else { goto out; } @@ -2130,102 +2169,67 @@ static int fg_esr_timer_config(struct fg_chip *chip, bool sleep) return 0; } -static void fg_batt_avg_update(struct fg_chip *chip) -{ - if (chip->charge_status == chip->prev_charge_status) - return; - - cancel_delayed_work_sync(&chip->batt_avg_work); - fg_circ_buf_clr(&chip->ibatt_circ_buf); - fg_circ_buf_clr(&chip->vbatt_circ_buf); - - if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING || - chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) - schedule_delayed_work(&chip->batt_avg_work, - msecs_to_jiffies(2000)); -} - -static void status_change_work(struct work_struct *work) +static void fg_ttf_update(struct fg_chip *chip) { - struct fg_chip *chip = container_of(work, - struct fg_chip, status_change_work); + int rc; + int delay_ms; union power_supply_propval prop = {0, }; - int rc, batt_temp; + int online = 0; - if (!batt_psy_initialized(chip)) { - fg_dbg(chip, FG_STATUS, "Charger not available?!\n"); - goto out; - } + if (usb_psy_initialized(chip)) { + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_ONLINE, &prop); + if (rc < 0) { + pr_err("Couldn't read usb ONLINE prop rc=%d\n", rc); + return; + } - rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, - &prop); - if (rc < 0) { - pr_err("Error in getting charging status, rc=%d\n", rc); - goto out; + online = online || prop.intval; } - chip->prev_charge_status = chip->charge_status; - chip->charge_status = prop.intval; - rc = power_supply_get_property(chip->batt_psy, - POWER_SUPPLY_PROP_CHARGE_TYPE, &prop); - if (rc < 0) { - pr_err("Error in getting charge type, rc=%d\n", rc); - goto out; - } + if (pc_port_psy_initialized(chip)) { + rc = power_supply_get_property(chip->pc_port_psy, + POWER_SUPPLY_PROP_ONLINE, &prop); + if (rc < 0) { + pr_err("Couldn't read pc_port ONLINE prop rc=%d\n", rc); + return; + } - chip->charge_type = prop.intval; - rc = power_supply_get_property(chip->batt_psy, - POWER_SUPPLY_PROP_CHARGE_DONE, &prop); - if (rc < 0) { - pr_err("Error in getting charge_done, rc=%d\n", rc); - goto out; + online = online || prop.intval; } - chip->charge_done = prop.intval; - if (chip->cyc_ctr.en) - schedule_work(&chip->cycle_count_work); - - fg_cap_learning_update(chip); - - rc = fg_charge_full_update(chip); - if (rc < 0) - pr_err("Error in charge_full_update, rc=%d\n", rc); - - rc = fg_adjust_recharge_soc(chip); - if (rc < 0) - pr_err("Error in adjusting recharge_soc, rc=%d\n", rc); - - rc = fg_adjust_ki_coeff_dischg(chip); - if (rc < 0) - pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); - - rc = fg_esr_fcc_config(chip); - if (rc < 0) - pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc); - - rc = fg_esr_timer_config(chip, false); - if (rc < 0) - pr_err("Error in configuring ESR timer, rc=%d\n", rc); - - rc = fg_get_battery_temp(chip, &batt_temp); - if (!rc) { - rc = fg_slope_limit_config(chip, batt_temp); - if (rc < 0) - pr_err("Error in configuring slope limiter rc:%d\n", - rc); + if (dc_psy_initialized(chip)) { + rc = power_supply_get_property(chip->dc_psy, + POWER_SUPPLY_PROP_ONLINE, &prop); + if (rc < 0) { + pr_err("Couldn't read dc ONLINE prop rc=%d\n", rc); + return; + } - rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp); - if (rc < 0) - pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", - rc); + online = online || prop.intval; } - fg_batt_avg_update(chip); -out: - fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n", - chip->charge_status, chip->charge_type, chip->charge_done); - pm_relax(chip->dev); + if (chip->online_status == online) + return; + + chip->online_status = online; + if (online) + /* wait 35 seconds for the input to settle */ + delay_ms = 35000; + else + /* wait 5 seconds for current to settle during discharge */ + delay_ms = 5000; + + vote(chip->awake_votable, TTF_PRIMING, true, 0); + cancel_delayed_work_sync(&chip->ttf_work); + mutex_lock(&chip->ttf.lock); + fg_circ_buf_clr(&chip->ttf.ibatt); + fg_circ_buf_clr(&chip->ttf.vbatt); + chip->ttf.last_ttf = 0; + chip->ttf.last_ms = 0; + mutex_unlock(&chip->ttf.lock); + schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(delay_ms)); } static void restore_cycle_counter(struct fg_chip *chip) @@ -2233,6 +2237,9 @@ static void restore_cycle_counter(struct fg_chip *chip) int rc = 0, i; u8 data[2]; + if (!chip->cyc_ctr.en) + return; + mutex_lock(&chip->cyc_ctr.lock); for (i = 0; i < BUCKET_COUNT; i++) { rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2), @@ -2286,20 +2293,25 @@ static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket) rc = fg_sram_write(chip, CYCLE_COUNT_WORD + (bucket / 2), CYCLE_COUNT_OFFSET + (bucket % 2) * 2, data, 2, FG_IMA_DEFAULT); - if (rc < 0) + if (rc < 0) { pr_err("failed to write BATT_CYCLE[%d] rc=%d\n", bucket, rc); - else - chip->cyc_ctr.count[bucket] = cyc_count; + return rc; + } + + chip->cyc_ctr.count[bucket] = cyc_count; + fg_dbg(chip, FG_STATUS, "Stored count %d in bucket %d\n", cyc_count, + bucket); + return rc; } -static void cycle_count_work(struct work_struct *work) +static void fg_cycle_counter_update(struct fg_chip *chip) { int rc = 0, bucket, i, batt_soc; - struct fg_chip *chip = container_of(work, - struct fg_chip, - cycle_count_work); + + if (!chip->cyc_ctr.en) + return; mutex_lock(&chip->cyc_ctr.lock); rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc); @@ -2311,45 +2323,30 @@ static void cycle_count_work(struct work_struct *work) /* We need only the most significant byte here */ batt_soc = (u32)batt_soc >> 24; - if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) { - /* Find out which bucket the SOC falls in */ - bucket = batt_soc / BUCKET_SOC_PCT; - pr_debug("batt_soc: %d bucket: %d\n", batt_soc, bucket); + /* Find out which bucket the SOC falls in */ + bucket = batt_soc / BUCKET_SOC_PCT; - /* - * If we've started counting for the previous bucket, - * then store the counter for that bucket if the - * counter for current bucket is getting started. - */ - if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] && - !chip->cyc_ctr.started[bucket]) { - rc = fg_inc_store_cycle_ctr(chip, bucket - 1); - if (rc < 0) { - pr_err("Error in storing cycle_ctr rc: %d\n", - rc); - goto out; - } else { - chip->cyc_ctr.started[bucket - 1] = false; - chip->cyc_ctr.last_soc[bucket - 1] = 0; - } - } + if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) { if (!chip->cyc_ctr.started[bucket]) { chip->cyc_ctr.started[bucket] = true; chip->cyc_ctr.last_soc[bucket] = batt_soc; } - } else { + } else if (chip->charge_done || !is_input_present(chip)) { for (i = 0; i < BUCKET_COUNT; i++) { if (chip->cyc_ctr.started[i] && - batt_soc > chip->cyc_ctr.last_soc[i]) { + batt_soc > chip->cyc_ctr.last_soc[i] + 2) { rc = fg_inc_store_cycle_ctr(chip, i); if (rc < 0) pr_err("Error in storing cycle_ctr rc: %d\n", rc); chip->cyc_ctr.last_soc[i] = 0; + chip->cyc_ctr.started[i] = false; } - chip->cyc_ctr.started[i] = false; } } + + fg_dbg(chip, FG_STATUS, "batt_soc: %d bucket: %d chg_status: %d\n", + batt_soc, bucket, chip->charge_status); out: mutex_unlock(&chip->cyc_ctr.lock); } @@ -2370,6 +2367,83 @@ static int fg_get_cycle_count(struct fg_chip *chip) return count; } +static void status_change_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, + struct fg_chip, status_change_work); + union power_supply_propval prop = {0, }; + int rc, batt_temp; + + if (!batt_psy_initialized(chip)) { + fg_dbg(chip, FG_STATUS, "Charger not available?!\n"); + goto out; + } + + rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS, + &prop); + if (rc < 0) { + pr_err("Error in getting charging status, rc=%d\n", rc); + goto out; + } + + chip->prev_charge_status = chip->charge_status; + chip->charge_status = prop.intval; + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_TYPE, &prop); + if (rc < 0) { + pr_err("Error in getting charge type, rc=%d\n", rc); + goto out; + } + + chip->charge_type = prop.intval; + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_DONE, &prop); + if (rc < 0) { + pr_err("Error in getting charge_done, rc=%d\n", rc); + goto out; + } + + chip->charge_done = prop.intval; + fg_cycle_counter_update(chip); + fg_cap_learning_update(chip); + + rc = fg_charge_full_update(chip); + if (rc < 0) + pr_err("Error in charge_full_update, rc=%d\n", rc); + + rc = fg_adjust_recharge_soc(chip); + if (rc < 0) + pr_err("Error in adjusting recharge_soc, rc=%d\n", rc); + + rc = fg_adjust_ki_coeff_dischg(chip); + if (rc < 0) + pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc); + + rc = fg_esr_fcc_config(chip); + if (rc < 0) + pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc); + + rc = fg_get_battery_temp(chip, &batt_temp); + if (!rc) { + rc = fg_slope_limit_config(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring slope limiter rc:%d\n", + rc); + + rc = fg_adjust_ki_coeff_full_soc(chip, batt_temp); + if (rc < 0) + pr_err("Error in configuring ki_coeff_full_soc rc:%d\n", + rc); + } + + fg_ttf_update(chip); + +out: + fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n", + chip->charge_status, chip->charge_type, chip->charge_done); + pm_relax(chip->dev); +} + static int fg_bp_params_config(struct fg_chip *chip) { int rc = 0; @@ -2730,45 +2804,19 @@ static struct kernel_param_ops fg_restart_ops = { module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644); -#define BATT_AVG_POLL_PERIOD_MS 10000 -static void batt_avg_work(struct work_struct *work) -{ - struct fg_chip *chip = container_of(work, struct fg_chip, - batt_avg_work.work); - int rc, ibatt_now, vbatt_now; - - mutex_lock(&chip->batt_avg_lock); - rc = fg_get_battery_current(chip, &ibatt_now); - if (rc < 0) { - pr_err("failed to get battery current, rc=%d\n", rc); - goto reschedule; - } - - rc = fg_get_battery_voltage(chip, &vbatt_now); - if (rc < 0) { - pr_err("failed to get battery voltage, rc=%d\n", rc); - goto reschedule; - } - - fg_circ_buf_add(&chip->ibatt_circ_buf, ibatt_now); - fg_circ_buf_add(&chip->vbatt_circ_buf, vbatt_now); - -reschedule: - mutex_unlock(&chip->batt_avg_lock); - schedule_delayed_work(&chip->batt_avg_work, - msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS)); -} - #define HOURS_TO_SECONDS 3600 #define OCV_SLOPE_UV 10869 #define MILLI_UNIT 1000 #define MICRO_UNIT 1000000 -static int fg_get_time_to_full(struct fg_chip *chip, int *val) +#define NANO_UNIT 1000000000 +static int fg_get_time_to_full_locked(struct fg_chip *chip, int *val) { - int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc, - act_cap_uah; - s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale; - s64 t_predicted_cc = 0, t_predicted_cv = 0; + int rc, ibatt_avg, vbatt_avg, rbatt, msoc, full_soc, act_cap_mah, + i_cc2cv, soc_cc2cv, tau, divisor, iterm, ttf_mode, + i, soc_per_step, msoc_this_step, msoc_next_step, + ibatt_this_step, t_predicted_this_step, ttf_slope, + t_predicted_cv, t_predicted = 0; + s64 delta_ms; if (chip->bp.float_volt_uv <= 0) { pr_err("battery profile is not loaded\n"); @@ -2787,48 +2835,53 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val) } fg_dbg(chip, FG_TTF, "msoc=%d\n", msoc); + /* the battery is considered full if the SOC is 100% */ if (msoc >= 100) { *val = 0; return 0; } - mutex_lock(&chip->batt_avg_lock); - rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg); - if (rc < 0) { - /* try to get instantaneous current */ - rc = fg_get_battery_current(chip, &ibatt_avg); - if (rc < 0) { - mutex_unlock(&chip->batt_avg_lock); - pr_err("failed to get battery current, rc=%d\n", rc); - return rc; - } + if (is_qnovo_en(chip)) + ttf_mode = TTF_MODE_QNOVO; + else + ttf_mode = TTF_MODE_NORMAL; + + /* when switching TTF algorithms the TTF needs to be reset */ + if (chip->ttf.mode != ttf_mode) { + fg_circ_buf_clr(&chip->ttf.ibatt); + fg_circ_buf_clr(&chip->ttf.vbatt); + chip->ttf.last_ttf = 0; + chip->ttf.last_ms = 0; + chip->ttf.mode = ttf_mode; } - rc = fg_circ_buf_avg(&chip->vbatt_circ_buf, &vbatt_avg); + /* at least 10 samples are required to produce a stable IBATT */ + if (chip->ttf.ibatt.size < 10) { + *val = -1; + return 0; + } + + rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg); if (rc < 0) { - /* try to get instantaneous voltage */ - rc = fg_get_battery_voltage(chip, &vbatt_avg); - if (rc < 0) { - mutex_unlock(&chip->batt_avg_lock); - pr_err("failed to get battery voltage, rc=%d\n", rc); - return rc; - } + pr_err("failed to get IBATT AVG rc=%d\n", rc); + return rc; } - mutex_unlock(&chip->batt_avg_lock); - fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg); + rc = fg_circ_buf_median(&chip->ttf.vbatt, &vbatt_avg); + if (rc < 0) { + pr_err("failed to get VBATT AVG rc=%d\n", rc); + return rc; + } - /* clamp ibatt_avg to -150mA */ - if (ibatt_avg > -150000) - ibatt_avg = -150000; - fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg); + ibatt_avg = -ibatt_avg / MILLI_UNIT; + vbatt_avg /= MILLI_UNIT; - /* reverse polarity to be consistent with unsigned current settings */ - ibatt_avg = abs(ibatt_avg); + /* clamp ibatt_avg to iterm */ + if (ibatt_avg < abs(chip->dt.sys_term_curr_ma)) + ibatt_avg = abs(chip->dt.sys_term_curr_ma); - /* estimated battery current at the CC to CV transition */ - i_cc2cv = div_s64((s64)ibatt_avg * vbatt_avg, chip->bp.float_volt_uv); - fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv); + fg_dbg(chip, FG_TTF, "ibatt_avg=%d\n", ibatt_avg); + fg_dbg(chip, FG_TTF, "vbatt_avg=%d\n", vbatt_avg); rc = fg_get_battery_resistance(chip, &rbatt); if (rc < 0) { @@ -2836,19 +2889,14 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val) return rc; } - /* clamp rbatt to 50mOhms */ - if (rbatt < 50000) - rbatt = 50000; - + rbatt /= MILLI_UNIT; fg_dbg(chip, FG_TTF, "rbatt=%d\n", rbatt); - rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah); + rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah); if (rc < 0) { pr_err("failed to get ACT_BATT_CAP rc=%d\n", rc); return rc; } - act_cap_uah *= MILLI_UNIT; - fg_dbg(chip, FG_TTF, "actual_capacity_uah=%d\n", act_cap_uah); rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc); if (rc < 0) { @@ -2857,69 +2905,148 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val) } full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY, FULL_SOC_RAW); - fg_dbg(chip, FG_TTF, "full_soc=%d\n", full_soc); + act_cap_mah = full_soc * act_cap_mah / 100; + fg_dbg(chip, FG_TTF, "act_cap_mah=%d\n", act_cap_mah); + + /* estimated battery current at the CC to CV transition */ + switch (chip->ttf.mode) { + case TTF_MODE_NORMAL: + i_cc2cv = ibatt_avg * vbatt_avg / + max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT); + break; + case TTF_MODE_QNOVO: + i_cc2cv = min( + chip->ttf.cc_step.arr[MAX_CC_STEPS - 1] / MILLI_UNIT, + ibatt_avg * vbatt_avg / + max(MILLI_UNIT, chip->bp.float_volt_uv / MILLI_UNIT)); + break; + default: + pr_err("TTF mode %d is not supported\n", chip->ttf.mode); + break; + } + fg_dbg(chip, FG_TTF, "i_cc2cv=%d\n", i_cc2cv); /* if we are already in CV state then we can skip estimating CC */ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) - goto skip_cc_estimate; - - /* if the charger is current limited then use power approximation */ - if (ibatt_avg > chip->bp.fastchg_curr_ma * MILLI_UNIT - 50000) - ocv_cc2cv = div_s64((s64)rbatt * ibatt_avg, MICRO_UNIT); - else - ocv_cc2cv = div_s64((s64)rbatt * i_cc2cv, MICRO_UNIT); - ocv_cc2cv = chip->bp.float_volt_uv - ocv_cc2cv; - fg_dbg(chip, FG_TTF, "ocv_cc2cv=%d\n", ocv_cc2cv); + goto cv_estimate; - soc_cc2cv = div_s64(chip->bp.float_volt_uv - ocv_cc2cv, OCV_SLOPE_UV); /* estimated SOC at the CC to CV transition */ + soc_cc2cv = DIV_ROUND_CLOSEST(rbatt * i_cc2cv, OCV_SLOPE_UV); soc_cc2cv = 100 - soc_cc2cv; fg_dbg(chip, FG_TTF, "soc_cc2cv=%d\n", soc_cc2cv); - /* the esimated SOC may be lower than the current SOC */ - if (soc_cc2cv - msoc <= 0) - goto skip_cc_estimate; + switch (chip->ttf.mode) { + case TTF_MODE_NORMAL: + if (soc_cc2cv - msoc <= 0) + goto cv_estimate; + + divisor = max(100, (ibatt_avg + i_cc2cv) / 2 * 100); + t_predicted = div_s64((s64)act_cap_mah * (soc_cc2cv - msoc) * + HOURS_TO_SECONDS, divisor); + break; + case TTF_MODE_QNOVO: + soc_per_step = 100 / MAX_CC_STEPS; + for (i = msoc / soc_per_step; i < MAX_CC_STEPS - 1; ++i) { + msoc_next_step = (i + 1) * soc_per_step; + if (i == msoc / soc_per_step) + msoc_this_step = msoc; + else + msoc_this_step = i * soc_per_step; + + /* scale ibatt by 85% to account for discharge pulses */ + ibatt_this_step = min( + chip->ttf.cc_step.arr[i] / MILLI_UNIT, + ibatt_avg) * 85 / 100; + divisor = max(100, ibatt_this_step * 100); + t_predicted_this_step = div_s64((s64)act_cap_mah * + (msoc_next_step - msoc_this_step) * + HOURS_TO_SECONDS, divisor); + t_predicted += t_predicted_this_step; + fg_dbg(chip, FG_TTF, "[%d, %d] ma=%d t=%d\n", + msoc_this_step, msoc_next_step, + ibatt_this_step, t_predicted_this_step); + } + break; + default: + pr_err("TTF mode %d is not supported\n", chip->ttf.mode); + break; + } - t_predicted_cc = div_s64((s64)full_soc * act_cap_uah, 100); - t_predicted_cc = div_s64(t_predicted_cc * (soc_cc2cv - msoc), 100); - t_predicted_cc *= HOURS_TO_SECONDS; - t_predicted_cc = div_s64(t_predicted_cc, (ibatt_avg + i_cc2cv) / 2); +cv_estimate: + fg_dbg(chip, FG_TTF, "t_predicted_cc=%d\n", t_predicted); -skip_cc_estimate: - fg_dbg(chip, FG_TTF, "t_predicted_cc=%lld\n", t_predicted_cc); + iterm = max(100, abs(chip->dt.sys_term_curr_ma) + 200); + fg_dbg(chip, FG_TTF, "iterm=%d\n", iterm); - /* CV estimate starts here */ - if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER) - ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200); + if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_TAPER) + tau = max(MILLI_UNIT, ibatt_avg * MILLI_UNIT / iterm); else - ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200); + tau = max(MILLI_UNIT, i_cc2cv * MILLI_UNIT / iterm); - if (msoc < 95) - centi_tau_scale = 100; - else - centi_tau_scale = 20 * (100 - msoc); - - fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val); - rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val); - fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val); - t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT); - t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100); - t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT); - t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT); - fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv); - *val = t_predicted_cc + t_predicted_cv; + rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), tau, &tau); + if (rc < 0) { + pr_err("failed to interpolate tau rc=%d\n", rc); + return rc; + } + + /* tau is scaled linearly from 95% to 100% SOC */ + if (msoc >= 95) + tau = tau * 2 * (100 - msoc) / 10; + + fg_dbg(chip, FG_TTF, "tau=%d\n", tau); + t_predicted_cv = div_s64((s64)act_cap_mah * rbatt * tau * + HOURS_TO_SECONDS, NANO_UNIT); + fg_dbg(chip, FG_TTF, "t_predicted_cv=%d\n", t_predicted_cv); + t_predicted += t_predicted_cv; + + fg_dbg(chip, FG_TTF, "t_predicted_prefilter=%d\n", t_predicted); + if (chip->ttf.last_ms != 0) { + delta_ms = ktime_ms_delta(ktime_get_boottime(), + ms_to_ktime(chip->ttf.last_ms)); + if (delta_ms > 10000) { + ttf_slope = div64_s64( + (s64)(t_predicted - chip->ttf.last_ttf) * + MICRO_UNIT, delta_ms); + if (ttf_slope > -100) + ttf_slope = -100; + else if (ttf_slope < -2000) + ttf_slope = -2000; + + t_predicted = div_s64( + (s64)ttf_slope * delta_ms, MICRO_UNIT) + + chip->ttf.last_ttf; + fg_dbg(chip, FG_TTF, "ttf_slope=%d\n", ttf_slope); + } else { + t_predicted = chip->ttf.last_ttf; + } + } + + /* clamp the ttf to 0 */ + if (t_predicted < 0) + t_predicted = 0; + + fg_dbg(chip, FG_TTF, "t_predicted_postfilter=%d\n", t_predicted); + *val = t_predicted; return 0; } +static int fg_get_time_to_full(struct fg_chip *chip, int *val) +{ + int rc; + + mutex_lock(&chip->ttf.lock); + rc = fg_get_time_to_full_locked(chip, val); + mutex_unlock(&chip->ttf.lock); + return rc; +} + #define CENTI_ICORRECT_C0 105 #define CENTI_ICORRECT_C1 20 static int fg_get_time_to_empty(struct fg_chip *chip, int *val) { - int rc, ibatt_avg, msoc, act_cap_uah; - s32 divisor; - s64 t_predicted; + int rc, ibatt_avg, msoc, full_soc, act_cap_mah, divisor; - rc = fg_circ_buf_avg(&chip->ibatt_circ_buf, &ibatt_avg); + rc = fg_circ_buf_median(&chip->ttf.ibatt, &ibatt_avg); if (rc < 0) { /* try to get instantaneous current */ rc = fg_get_battery_current(chip, &ibatt_avg); @@ -2929,31 +3056,36 @@ static int fg_get_time_to_empty(struct fg_chip *chip, int *val) } } - /* clamp ibatt_avg to 150mA */ - if (ibatt_avg < 150000) - ibatt_avg = 150000; + ibatt_avg /= MILLI_UNIT; + /* clamp ibatt_avg to 100mA */ + if (ibatt_avg < 100) + ibatt_avg = 100; + + rc = fg_get_prop_capacity(chip, &msoc); + if (rc < 0) { + pr_err("Error in getting capacity, rc=%d\n", rc); + return rc; + } - rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_uah); + rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah); if (rc < 0) { pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc); return rc; } - act_cap_uah *= MILLI_UNIT; - rc = fg_get_prop_capacity(chip, &msoc); + rc = fg_get_sram_prop(chip, FG_SRAM_FULL_SOC, &full_soc); if (rc < 0) { - pr_err("Error in getting capacity, rc=%d\n", rc); + pr_err("failed to get full soc rc=%d\n", rc); return rc; } + full_soc = DIV_ROUND_CLOSEST(((u16)full_soc >> 8) * FULL_CAPACITY, + FULL_SOC_RAW); + act_cap_mah = full_soc * act_cap_mah / 100; - t_predicted = div_s64((s64)msoc * act_cap_uah, 100); - t_predicted *= HOURS_TO_SECONDS; divisor = CENTI_ICORRECT_C0 * 100 + CENTI_ICORRECT_C1 * msoc; - divisor = div_s64((s64)divisor * ibatt_avg, 10000); - if (divisor > 0) - t_predicted = div_s64(t_predicted, divisor); - - *val = t_predicted; + divisor = ibatt_avg * divisor / 100; + divisor = max(100, divisor); + *val = act_cap_mah * msoc * HOURS_TO_SECONDS / divisor; return 0; } @@ -3032,6 +3164,150 @@ static int fg_esr_validate(struct fg_chip *chip) return 0; } +static int fg_force_esr_meas(struct fg_chip *chip) +{ + int rc; + int esr_uohms; + + /* force esr extraction enable */ + rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD, + ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), BIT(0), + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("failed to enable esr extn rc=%d\n", rc); + return rc; + } + + rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), + LD_REG_CTRL_BIT, 0); + if (rc < 0) { + pr_err("Error in configuring qnovo_cfg rc=%d\n", rc); + return rc; + } + + rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip), + ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT, + ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT); + if (rc < 0) { + pr_err("Error in configuring force ESR rc=%d\n", rc); + return rc; + } + + /* wait 1.5 seconds for hw to measure ESR */ + msleep(1500); + rc = fg_masked_write(chip, BATT_INFO_TM_MISC1(chip), + ESR_REQ_CTL_BIT | ESR_REQ_CTL_EN_BIT, + 0); + if (rc < 0) { + pr_err("Error in restoring force ESR rc=%d\n", rc); + return rc; + } + + rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), + LD_REG_CTRL_BIT, LD_REG_CTRL_BIT); + if (rc < 0) { + pr_err("Error in restoring qnovo_cfg rc=%d\n", rc); + return rc; + } + + /* force esr extraction disable */ + rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD, + ESR_EXTRACTION_ENABLE_OFFSET, BIT(0), 0, + FG_IMA_DEFAULT); + if (rc < 0) { + pr_err("failed to disable esr extn rc=%d\n", rc); + return rc; + } + + fg_get_battery_resistance(chip, &esr_uohms); + fg_dbg(chip, FG_STATUS, "ESR uohms = %d\n", esr_uohms); + + return rc; +} + +static int fg_prepare_for_qnovo(struct fg_chip *chip, int qnovo_enable) +{ + int rc; + + /* force esr extraction disable when qnovo enables */ + rc = fg_sram_masked_write(chip, ESR_EXTRACTION_ENABLE_WORD, + ESR_EXTRACTION_ENABLE_OFFSET, + BIT(0), qnovo_enable ? 0 : BIT(0), + FG_IMA_DEFAULT); + if (rc < 0) + pr_err("Error in configuring esr extraction rc=%d\n", rc); + + rc = fg_masked_write(chip, BATT_INFO_QNOVO_CFG(chip), + LD_REG_CTRL_BIT, + qnovo_enable ? LD_REG_CTRL_BIT : 0); + if (rc < 0) { + pr_err("Error in configuring qnovo_cfg rc=%d\n", rc); + return rc; + } + fg_dbg(chip, FG_STATUS, "Prepared for Qnovo\n"); + return 0; +} + +static void ttf_work(struct work_struct *work) +{ + struct fg_chip *chip = container_of(work, struct fg_chip, + ttf_work.work); + int rc, ibatt_now, vbatt_now, ttf; + ktime_t ktime_now; + + mutex_lock(&chip->ttf.lock); + if (chip->charge_status != POWER_SUPPLY_STATUS_CHARGING && + chip->charge_status != POWER_SUPPLY_STATUS_DISCHARGING) + goto end_work; + + rc = fg_get_battery_current(chip, &ibatt_now); + if (rc < 0) { + pr_err("failed to get battery current, rc=%d\n", rc); + goto end_work; + } + + rc = fg_get_battery_voltage(chip, &vbatt_now); + if (rc < 0) { + pr_err("failed to get battery voltage, rc=%d\n", rc); + goto end_work; + } + + fg_circ_buf_add(&chip->ttf.ibatt, ibatt_now); + fg_circ_buf_add(&chip->ttf.vbatt, vbatt_now); + + if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING) { + rc = fg_get_time_to_full_locked(chip, &ttf); + if (rc < 0) { + pr_err("failed to get ttf, rc=%d\n", rc); + goto end_work; + } + + /* keep the wake lock and prime the IBATT and VBATT buffers */ + if (ttf < 0) { + /* delay for one FG cycle */ + schedule_delayed_work(&chip->ttf_work, + msecs_to_jiffies(1500)); + mutex_unlock(&chip->ttf.lock); + return; + } + + /* update the TTF reference point every minute */ + ktime_now = ktime_get_boottime(); + if (ktime_ms_delta(ktime_now, + ms_to_ktime(chip->ttf.last_ms)) > 60000 || + chip->ttf.last_ms == 0) { + chip->ttf.last_ttf = ttf; + chip->ttf.last_ms = ktime_to_ms(ktime_now); + } + } + + /* recurse every 10 seconds */ + schedule_delayed_work(&chip->ttf_work, msecs_to_jiffies(10000)); +end_work: + vote(chip->awake_votable, TTF_PRIMING, false, 0); + mutex_unlock(&chip->ttf.lock); +} + /* PSY CALLBACKS STAY HERE */ static int fg_psy_get_property(struct power_supply *psy, @@ -3108,6 +3384,20 @@ static int fg_psy_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: rc = fg_get_sram_prop(chip, FG_SRAM_VBATT_FULL, &pval->intval); break; + case POWER_SUPPLY_PROP_CC_STEP: + if ((chip->ttf.cc_step.sel >= 0) && + (chip->ttf.cc_step.sel < MAX_CC_STEPS)) { + pval->intval = + chip->ttf.cc_step.arr[chip->ttf.cc_step.sel]; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + chip->ttf.cc_step.sel); + return -EINVAL; + } + break; + case POWER_SUPPLY_PROP_CC_STEP_SEL: + pval->intval = chip->ttf.cc_step.sel; + break; default: pr_err("unsupported property %d\n", psp); rc = -EINVAL; @@ -3140,6 +3430,32 @@ static int fg_psy_set_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: rc = fg_set_constant_chg_voltage(chip, pval->intval); break; + case POWER_SUPPLY_PROP_RESISTANCE: + rc = fg_force_esr_meas(chip); + break; + case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE: + rc = fg_prepare_for_qnovo(chip, pval->intval); + break; + case POWER_SUPPLY_PROP_CC_STEP: + if ((chip->ttf.cc_step.sel >= 0) && + (chip->ttf.cc_step.sel < MAX_CC_STEPS)) { + chip->ttf.cc_step.arr[chip->ttf.cc_step.sel] = + pval->intval; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + chip->ttf.cc_step.sel); + return -EINVAL; + } + break; + case POWER_SUPPLY_PROP_CC_STEP_SEL: + if ((pval->intval >= 0) && (pval->intval < MAX_CC_STEPS)) { + chip->ttf.cc_step.sel = pval->intval; + } else { + pr_err("cc_step_sel is out of bounds [0, %d]\n", + pval->intval); + return -EINVAL; + } + break; default: break; } @@ -3153,6 +3469,8 @@ static int fg_property_is_writeable(struct power_supply *psy, switch (psp) { case POWER_SUPPLY_PROP_CYCLE_COUNT_ID: case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + case POWER_SUPPLY_PROP_CC_STEP: + case POWER_SUPPLY_PROP_CC_STEP_SEL: return 1; default: break; @@ -3213,6 +3531,8 @@ static enum power_supply_property fg_psy_props[] = { POWER_SUPPLY_PROP_SOC_REPORTING_READY, POWER_SUPPLY_PROP_DEBUG_BATTERY, POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, + POWER_SUPPLY_PROP_CC_STEP, + POWER_SUPPLY_PROP_CC_STEP_SEL, }; static const struct power_supply_desc fg_psy_desc = { @@ -3423,8 +3743,7 @@ static int fg_hw_init(struct fg_chip *chip) return rc; } - if (chip->cyc_ctr.en) - restore_cycle_counter(chip); + restore_cycle_counter(chip); if (chip->dt.jeita_hyst_temp >= 0) { val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT; @@ -3698,8 +4017,7 @@ static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data) int rc; fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq); - if (chip->cyc_ctr.en) - schedule_work(&chip->cycle_count_work); + fg_cycle_counter_update(chip); if (chip->cl.active) fg_cap_learning_update(chip); @@ -4394,6 +4712,7 @@ static int fg_gen3_probe(struct platform_device *pdev) chip->charge_status = -EINVAL; chip->prev_charge_status = -EINVAL; chip->ki_coeff_full_soc = -EINVAL; + chip->online_status = -EINVAL; chip->regmap = dev_get_regmap(chip->dev->parent, NULL); if (!chip->regmap) { dev_err(chip->dev, "Parent regmap is unavailable\n"); @@ -4462,14 +4781,13 @@ static int fg_gen3_probe(struct platform_device *pdev) mutex_init(&chip->sram_rw_lock); mutex_init(&chip->cyc_ctr.lock); mutex_init(&chip->cl.lock); - mutex_init(&chip->batt_avg_lock); + mutex_init(&chip->ttf.lock); mutex_init(&chip->charge_full_lock); init_completion(&chip->soc_update); init_completion(&chip->soc_ready); INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work); INIT_WORK(&chip->status_change_work, status_change_work); - INIT_WORK(&chip->cycle_count_work, cycle_count_work); - INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work); + INIT_DELAYED_WORK(&chip->ttf_work, ttf_work); INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work); rc = fg_memif_init(chip); @@ -4566,7 +4884,7 @@ static int fg_gen3_suspend(struct device *dev) if (rc < 0) pr_err("Error in configuring ESR timer, rc=%d\n", rc); - cancel_delayed_work_sync(&chip->batt_avg_work); + cancel_delayed_work_sync(&chip->ttf_work); if (fg_sram_dump) cancel_delayed_work_sync(&chip->sram_dump_work); return 0; @@ -4581,9 +4899,7 @@ static int fg_gen3_resume(struct device *dev) if (rc < 0) pr_err("Error in configuring ESR timer, rc=%d\n", rc); - fg_circ_buf_clr(&chip->ibatt_circ_buf); - fg_circ_buf_clr(&chip->vbatt_circ_buf); - schedule_delayed_work(&chip->batt_avg_work, 0); + schedule_delayed_work(&chip->ttf_work, 0); if (fg_sram_dump) schedule_delayed_work(&chip->sram_dump_work, msecs_to_jiffies(fg_sram_dump_period_ms)); diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c index eb97eb0ff2ac..cf90f9041935 100644 --- a/drivers/power/supply/qcom/qpnp-qnovo.c +++ b/drivers/power/supply/qcom/qpnp-qnovo.c @@ -20,6 +20,7 @@ #include <linux/of_irq.h> #include <linux/qpnp/qpnp-revid.h> #include <linux/pmic-voter.h> +#include <linux/delay.h> #define QNOVO_REVISION1 0x00 #define QNOVO_REVISION2 0x01 @@ -114,6 +115,17 @@ #define OK_TO_QNOVO_VOTER "ok_to_qnovo_voter" #define QNOVO_VOTER "qnovo_voter" +#define FG_AVAILABLE_VOTER "FG_AVAILABLE_VOTER" +#define QNOVO_OVERALL_VOTER "QNOVO_OVERALL_VOTER" +#define QNI_PT_VOTER "QNI_PT_VOTER" +#define ESR_VOTER "ESR_VOTER" + +#define HW_OK_TO_QNOVO_VOTER "HW_OK_TO_QNOVO_VOTER" +#define CHG_READY_VOTER "CHG_READY_VOTER" +#define USB_READY_VOTER "USB_READY_VOTER" +#define DC_READY_VOTER "DC_READY_VOTER" + +#define PT_RESTART_VOTER "PT_RESTART_VOTER" struct qnovo_dt_props { bool external_rsense; @@ -127,6 +139,10 @@ struct qnovo { struct qnovo_dt_props dt; struct device *dev; struct votable *disable_votable; + struct votable *pt_dis_votable; + struct votable *not_ok_to_qnovo_votable; + struct votable *chg_ready_votable; + struct votable *awake_votable; struct class qnovo_class; struct pmic_revid_data *pmic_rev_id; u32 wa_flags; @@ -138,10 +154,18 @@ struct qnovo { s64 v_gain_mega; struct notifier_block nb; struct power_supply *batt_psy; + struct power_supply *bms_psy; + struct power_supply *usb_psy; + struct power_supply *dc_psy; struct work_struct status_change_work; int fv_uV_request; int fcc_uA_request; - bool ok_to_qnovo; + int usb_present; + int dc_present; + struct delayed_work usb_debounce_work; + struct delayed_work dc_debounce_work; + + struct delayed_work ptrain_restart_work; }; static int debug_mask; @@ -229,6 +253,39 @@ static bool is_batt_available(struct qnovo *chip) return true; } +static bool is_fg_available(struct qnovo *chip) +{ + if (!chip->bms_psy) + chip->bms_psy = power_supply_get_by_name("bms"); + + if (!chip->bms_psy) + return false; + + return true; +} + +static bool is_usb_available(struct qnovo *chip) +{ + if (!chip->usb_psy) + chip->usb_psy = power_supply_get_by_name("usb"); + + if (!chip->usb_psy) + return false; + + return true; +} + +static bool is_dc_available(struct qnovo *chip) +{ + if (!chip->dc_psy) + chip->dc_psy = power_supply_get_by_name("dc"); + + if (!chip->dc_psy) + return false; + + return true; +} + static int qnovo_batt_psy_update(struct qnovo *chip, bool disable) { union power_supply_propval pval = {0}; @@ -281,10 +338,86 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable, return -EINVAL; } + /* + * fg must be available for enable FG_AVAILABLE_VOTER + * won't enable it otherwise + */ + + if (is_fg_available(chip)) + power_supply_set_property(chip->bms_psy, + POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, + &pval); + + vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, disable, 0); rc = qnovo_batt_psy_update(chip, disable); return rc; } +static int pt_dis_votable_cb(struct votable *votable, void *data, int disable, + const char *client) +{ + struct qnovo *chip = data; + int rc; + + if (disable) { + cancel_delayed_work_sync(&chip->ptrain_restart_work); + vote(chip->awake_votable, PT_RESTART_VOTER, false, 0); + } + + rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, + (bool)disable ? 0 : QNOVO_PTRAIN_EN_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n", + (bool)disable ? "disable" : "enable", rc); + return rc; + } + + if (!disable) { + vote(chip->awake_votable, PT_RESTART_VOTER, true, 0); + schedule_delayed_work(&chip->ptrain_restart_work, + msecs_to_jiffies(20)); + } + + return 0; +} + +static int not_ok_to_qnovo_cb(struct votable *votable, void *data, + int not_ok_to_qnovo, + const char *client) +{ + struct qnovo *chip = data; + + vote(chip->disable_votable, OK_TO_QNOVO_VOTER, not_ok_to_qnovo, 0); + if (not_ok_to_qnovo) + vote(chip->disable_votable, USER_VOTER, true, 0); + + kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); + return 0; +} + +static int chg_ready_cb(struct votable *votable, void *data, int ready, + const char *client) +{ + struct qnovo *chip = data; + + vote(chip->not_ok_to_qnovo_votable, CHG_READY_VOTER, !ready, 0); + + return 0; +} + +static int awake_cb(struct votable *votable, void *data, int awake, + const char *client) +{ + struct qnovo *chip = data; + + if (awake) + pm_stay_awake(chip->dev); + else + pm_relax(chip->dev); + + return 0; +} + static int qnovo_parse_dt(struct qnovo *chip) { struct device_node *node = chip->dev->of_node; @@ -626,8 +759,9 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr, char *buf) { struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); + int val = get_effective_result(chip->not_ok_to_qnovo_votable); - return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo); + return snprintf(buf, PAGE_SIZE, "%d\n", !val); } static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr, @@ -656,21 +790,10 @@ static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr, static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr, char *ubuf) { - int i = attr - qnovo_attributes; struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); - u8 buf[2] = {0, 0}; - u16 regval; - int rc; - - rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs); - if (rc < 0) { - pr_err("Couldn't read %s rc = %d\n", params[i].name, rc); - return -EINVAL; - } - regval = buf[1] << 8 | buf[0]; + int val = get_effective_result(chip->pt_dis_votable); - return snprintf(ubuf, PAGE_SIZE, "%d\n", - (int)(regval & QNOVO_PTRAIN_EN_BIT)); + return snprintf(ubuf, PAGE_SIZE, "%d\n", !val); } static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr, @@ -678,21 +801,12 @@ static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr, { struct qnovo *chip = container_of(c, struct qnovo, qnovo_class); unsigned long val; - int rc = 0; - - if (get_effective_result(chip->disable_votable)) - return -EINVAL; if (kstrtoul(ubuf, 0, &val)) return -EINVAL; - rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, - (bool)val ? QNOVO_PTRAIN_EN_BIT : 0); - if (rc < 0) { - dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n", - (bool)val ? "enable" : "disable", rc); - return rc; - } + /* val being 0, userspace wishes to disable pt so vote true */ + vote(chip->pt_dis_votable, QNI_PT_VOTER, val ? false : true, 0); return count; } @@ -1116,41 +1230,146 @@ static int qnovo_update_status(struct qnovo *chip) { u8 val = 0; int rc; - bool ok_to_qnovo; - bool changed = false; + bool hw_ok_to_qnovo; rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1); if (rc < 0) { pr_err("Couldn't read error sts rc = %d\n", rc); - ok_to_qnovo = false; + hw_ok_to_qnovo = false; } else { /* * For CV mode keep qnovo enabled, userspace is expected to * disable it after few runs */ - ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? true : false; + hw_ok_to_qnovo = (val == ERR_CV_MODE || val == 0) ? + true : false; } - if (chip->ok_to_qnovo ^ ok_to_qnovo) { + vote(chip->not_ok_to_qnovo_votable, HW_OK_TO_QNOVO_VOTER, + !hw_ok_to_qnovo, 0); + return 0; +} - vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !ok_to_qnovo, 0); - if (!ok_to_qnovo) - vote(chip->disable_votable, USER_VOTER, true, 0); +static void usb_debounce_work(struct work_struct *work) +{ + struct qnovo *chip = container_of(work, + struct qnovo, usb_debounce_work.work); - chip->ok_to_qnovo = ok_to_qnovo; - changed = true; - } + vote(chip->chg_ready_votable, USB_READY_VOTER, true, 0); + vote(chip->awake_votable, USB_READY_VOTER, false, 0); +} - return changed; +static void dc_debounce_work(struct work_struct *work) +{ + struct qnovo *chip = container_of(work, + struct qnovo, dc_debounce_work.work); + + vote(chip->chg_ready_votable, DC_READY_VOTER, true, 0); + vote(chip->awake_votable, DC_READY_VOTER, false, 0); } +#define DEBOUNCE_MS 15000 /* 15 seconds */ static void status_change_work(struct work_struct *work) { struct qnovo *chip = container_of(work, struct qnovo, status_change_work); + union power_supply_propval pval; + bool usb_present = false, dc_present = false; + int rc; + + if (is_fg_available(chip)) + vote(chip->disable_votable, FG_AVAILABLE_VOTER, false, 0); + + if (is_usb_available(chip)) { + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + usb_present = (rc < 0) ? 0 : pval.intval; + } + + if (chip->usb_present && !usb_present) { + /* removal */ + chip->usb_present = 0; + cancel_delayed_work_sync(&chip->usb_debounce_work); + vote(chip->awake_votable, USB_READY_VOTER, false, 0); + vote(chip->chg_ready_votable, USB_READY_VOTER, false, 0); + } else if (!chip->usb_present && usb_present) { + /* insertion */ + chip->usb_present = 1; + vote(chip->awake_votable, USB_READY_VOTER, true, 0); + schedule_delayed_work(&chip->usb_debounce_work, + msecs_to_jiffies(DEBOUNCE_MS)); + } + + if (is_dc_available(chip)) { + rc = power_supply_get_property(chip->dc_psy, + POWER_SUPPLY_PROP_PRESENT, + &pval); + dc_present = (rc < 0) ? 0 : pval.intval; + } - if (qnovo_update_status(chip)) - kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); + if (usb_present) + dc_present = 0; + + if (chip->dc_present && !dc_present) { + /* removal */ + chip->dc_present = 0; + cancel_delayed_work_sync(&chip->dc_debounce_work); + vote(chip->awake_votable, DC_READY_VOTER, false, 0); + vote(chip->chg_ready_votable, DC_READY_VOTER, false, 0); + } else if (!chip->dc_present && dc_present) { + /* insertion */ + chip->dc_present = 1; + vote(chip->awake_votable, DC_READY_VOTER, true, 0); + schedule_delayed_work(&chip->dc_debounce_work, + msecs_to_jiffies(DEBOUNCE_MS)); + } + + qnovo_update_status(chip); +} + +static void ptrain_restart_work(struct work_struct *work) +{ + struct qnovo *chip = container_of(work, + struct qnovo, ptrain_restart_work.work); + u8 pt_t1, pt_t2; + int rc; + + rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t1, 1); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n", + rc); + goto clean_up; + } + + /* pttime increments every 2 seconds */ + msleep(2100); + + rc = qnovo_read(chip, QNOVO_PTTIME_STS, &pt_t2, 1); + if (rc < 0) { + dev_err(chip->dev, "Couldn't read QNOVO_PTTIME_STS rc = %d\n", + rc); + goto clean_up; + } + + if (pt_t1 != pt_t2) + goto clean_up; + + /* Toggle pt enable to restart pulse train */ + rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, 0); + if (rc < 0) { + dev_err(chip->dev, "Couldn't disable pulse train rc=%d\n", rc); + goto clean_up; + } + msleep(1000); + rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT, + QNOVO_PTRAIN_EN_BIT); + if (rc < 0) { + dev_err(chip->dev, "Couldn't enable pulse train rc=%d\n", rc); + goto clean_up; + } + +clean_up: + vote(chip->awake_votable, PT_RESTART_VOTER, false, 0); } static int qnovo_notifier_call(struct notifier_block *nb, @@ -1162,7 +1381,10 @@ static int qnovo_notifier_call(struct notifier_block *nb, if (ev != PSY_EVENT_PROP_CHANGED) return NOTIFY_OK; - if (strcmp(psy->desc->name, "battery") == 0) + if (strcmp(psy->desc->name, "battery") == 0 + || strcmp(psy->desc->name, "bms") == 0 + || strcmp(psy->desc->name, "usb") == 0 + || strcmp(psy->desc->name, "dc") == 0) schedule_work(&chip->status_change_work); return NOTIFY_OK; @@ -1171,7 +1393,23 @@ static int qnovo_notifier_call(struct notifier_block *nb, static irqreturn_t handle_ptrain_done(int irq, void *data) { struct qnovo *chip = data; + union power_supply_propval pval = {0}; + /* + * hw resets pt_en bit once ptrain_done triggers. + * vote on behalf of QNI to disable it such that + * once QNI enables it, the votable state changes + * and the callback that sets it is indeed invoked + */ + vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0); + + vote(chip->pt_dis_votable, ESR_VOTER, true, 0); + if (is_fg_available(chip)) + power_supply_set_property(chip->bms_psy, + POWER_SUPPLY_PROP_RESISTANCE, + &pval); + + vote(chip->pt_dis_votable, ESR_VOTER, false, 0); qnovo_update_status(chip); kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE); return IRQ_HANDLED; @@ -1186,6 +1424,11 @@ static int qnovo_hw_init(struct qnovo *chip) u8 val; vote(chip->disable_votable, USER_VOTER, true, 0); + vote(chip->disable_votable, FG_AVAILABLE_VOTER, true, 0); + + vote(chip->pt_dis_votable, QNI_PT_VOTER, true, 0); + vote(chip->pt_dis_votable, QNOVO_OVERALL_VOTER, true, 0); + vote(chip->pt_dis_votable, ESR_VOTER, false, 0); val = 0; rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1); @@ -1349,12 +1592,45 @@ static int qnovo_probe(struct platform_device *pdev) goto cleanup; } + chip->pt_dis_votable = create_votable("QNOVO_PT_DIS", VOTE_SET_ANY, + pt_dis_votable_cb, chip); + if (IS_ERR(chip->pt_dis_votable)) { + rc = PTR_ERR(chip->pt_dis_votable); + goto destroy_disable_votable; + } + + chip->not_ok_to_qnovo_votable = create_votable("QNOVO_NOT_OK", + VOTE_SET_ANY, + not_ok_to_qnovo_cb, chip); + if (IS_ERR(chip->not_ok_to_qnovo_votable)) { + rc = PTR_ERR(chip->not_ok_to_qnovo_votable); + goto destroy_pt_dis_votable; + } + + chip->chg_ready_votable = create_votable("QNOVO_CHG_READY", + VOTE_SET_ANY, + chg_ready_cb, chip); + if (IS_ERR(chip->chg_ready_votable)) { + rc = PTR_ERR(chip->chg_ready_votable); + goto destroy_not_ok_to_qnovo_votable; + } + + chip->awake_votable = create_votable("QNOVO_AWAKE", VOTE_SET_ANY, + awake_cb, chip); + if (IS_ERR(chip->awake_votable)) { + rc = PTR_ERR(chip->awake_votable); + goto destroy_chg_ready_votable; + } + INIT_WORK(&chip->status_change_work, status_change_work); + INIT_DELAYED_WORK(&chip->dc_debounce_work, dc_debounce_work); + INIT_DELAYED_WORK(&chip->usb_debounce_work, usb_debounce_work); + INIT_DELAYED_WORK(&chip->ptrain_restart_work, ptrain_restart_work); rc = qnovo_hw_init(chip); if (rc < 0) { pr_err("Couldn't initialize hardware rc=%d\n", rc); - goto destroy_votable; + goto destroy_awake_votable; } rc = qnovo_register_notifier(chip); @@ -1390,7 +1666,15 @@ static int qnovo_probe(struct platform_device *pdev) unreg_notifier: power_supply_unreg_notifier(&chip->nb); -destroy_votable: +destroy_awake_votable: + destroy_votable(chip->awake_votable); +destroy_chg_ready_votable: + destroy_votable(chip->chg_ready_votable); +destroy_not_ok_to_qnovo_votable: + destroy_votable(chip->not_ok_to_qnovo_votable); +destroy_pt_dis_votable: + destroy_votable(chip->pt_dis_votable); +destroy_disable_votable: destroy_votable(chip->disable_votable); cleanup: platform_set_drvdata(pdev, NULL); @@ -1403,6 +1687,9 @@ static int qnovo_remove(struct platform_device *pdev) class_unregister(&chip->qnovo_class); power_supply_unreg_notifier(&chip->nb); + destroy_votable(chip->chg_ready_votable); + destroy_votable(chip->not_ok_to_qnovo_votable); + destroy_votable(chip->pt_dis_votable); destroy_votable(chip->disable_votable); platform_set_drvdata(pdev, NULL); return 0; diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index 630d02eb7a67..1139f33866c4 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -210,6 +210,9 @@ static int smb2_parse_dt(struct smb2 *chip) chg->step_chg_enabled = of_property_read_bool(node, "qcom,step-charging-enable"); + chg->sw_jeita_enabled = of_property_read_bool(node, + "qcom,sw-jeita-enable"); + rc = of_property_read_u32(node, "qcom,wd-bark-time-secs", &chip->dt.wd_bark_time); if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME) diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index ab6ba03e3556..f4d286c6a324 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -488,6 +488,45 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg, /******************** * HELPER FUNCTIONS * ********************/ +static int smblib_request_dpdm(struct smb_charger *chg, bool enable) +{ + int rc = 0; + + /* fetch the DPDM regulator */ + if (!chg->dpdm_reg && of_get_property(chg->dev->of_node, + "dpdm-supply", NULL)) { + chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm"); + if (IS_ERR(chg->dpdm_reg)) { + rc = PTR_ERR(chg->dpdm_reg); + smblib_err(chg, "Couldn't get dpdm regulator rc=%d\n", + rc); + chg->dpdm_reg = NULL; + return rc; + } + } + + if (enable) { + if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) { + smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n"); + rc = regulator_enable(chg->dpdm_reg); + if (rc < 0) + smblib_err(chg, + "Couldn't enable dpdm regulator rc=%d\n", + rc); + } + } else { + if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { + smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); + rc = regulator_disable(chg->dpdm_reg); + if (rc < 0) + smblib_err(chg, + "Couldn't disable dpdm regulator rc=%d\n", + rc); + } + } + + return rc; +} static void smblib_rerun_apsd(struct smb_charger *chg) { @@ -607,13 +646,9 @@ static void smblib_uusb_removal(struct smb_charger *chg) cancel_delayed_work_sync(&chg->pl_enable_work); - if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { - smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); - rc = regulator_disable(chg->dpdm_reg); - if (rc < 0) - smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", - rc); - } + rc = smblib_request_dpdm(chg, false); + if (rc < 0) + smblib_err(chg, "Couldn't to disable DPDM rc=%d\n", rc); if (chg->wa_flags & BOOST_BACK_WA) { data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; @@ -705,24 +740,9 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg) if (!val.intval) return 0; - /* fetch the DPDM regulator */ - if (!chg->dpdm_reg && of_get_property(chg->dev->of_node, - "dpdm-supply", NULL)) { - chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm"); - if (IS_ERR(chg->dpdm_reg)) { - smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n", - PTR_ERR(chg->dpdm_reg)); - chg->dpdm_reg = NULL; - } - } - - if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) { - smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n"); - rc = regulator_enable(chg->dpdm_reg); - if (rc < 0) - smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n", - rc); - } + rc = smblib_request_dpdm(chg, true); + if (rc < 0) + smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc); chg->uusb_apsd_rerun_done = true; smblib_rerun_apsd(chg); @@ -1551,8 +1571,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg, union power_supply_propval *val) { union power_supply_propval pval = {0, }; - bool usb_online, dc_online; - u8 stat; + bool usb_online, dc_online, qnovo_en; + u8 stat, pt_en_cmd; int rc; rc = smblib_get_prop_usb_online(chg, &pval); @@ -1620,11 +1640,22 @@ int smblib_get_prop_batt_status(struct smb_charger *chg, smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n", rc); return rc; - } + } stat &= ENABLE_TRICKLE_BIT | ENABLE_PRE_CHARGING_BIT | ENABLE_FAST_CHARGING_BIT | ENABLE_FULLON_MODE_BIT; - if (!stat) + + rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &pt_en_cmd); + if (rc < 0) { + smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD_REG rc=%d\n", + rc); + return rc; + } + + qnovo_en = (bool)(pt_en_cmd & QNOVO_PT_ENABLE_CMD_BIT); + + /* ignore stat7 when qnovo is enabled */ + if (!qnovo_en && !stat) val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; return 0; @@ -3253,25 +3284,10 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) smblib_set_opt_freq_buck(chg, vbus_rising ? chg->chg_freq.freq_5V : chg->chg_freq.freq_removal); - /* fetch the DPDM regulator */ - if (!chg->dpdm_reg && of_get_property(chg->dev->of_node, - "dpdm-supply", NULL)) { - chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm"); - if (IS_ERR(chg->dpdm_reg)) { - smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n", - PTR_ERR(chg->dpdm_reg)); - chg->dpdm_reg = NULL; - } - } - if (vbus_rising) { - if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) { - smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n"); - rc = regulator_enable(chg->dpdm_reg); - if (rc < 0) - smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n", - rc); - } + rc = smblib_request_dpdm(chg, true); + if (rc < 0) + smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc); /* Schedule work to enable parallel charger */ vote(chg->awake_votable, PL_DELAY_VOTER, true, 0); @@ -3291,13 +3307,9 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) } } - if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { - smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); - rc = regulator_disable(chg->dpdm_reg); - if (rc < 0) - smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", - rc); - } + rc = smblib_request_dpdm(chg, false); + if (rc < 0) + smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc); } if (chg->micro_usb_mode) @@ -3685,13 +3697,9 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) chg->cc2_detach_wa_active = false; - if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { - smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); - rc = regulator_disable(chg->dpdm_reg); - if (rc < 0) - smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", - rc); - } + rc = smblib_request_dpdm(chg, false); + if (rc < 0) + smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc); if (chg->wa_flags & BOOST_BACK_WA) { data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; @@ -3834,10 +3842,14 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg) smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n", rc); - if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) + if (chg->typec_status[3] & UFP_DFP_MODE_STATUS_BIT) { typec_sink_insertion(chg); - else + } else { + rc = smblib_request_dpdm(chg, true); + if (rc < 0) + smblib_err(chg, "Couldn't to enable DPDM rc=%d\n", rc); typec_sink_removal(chg); + } } static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode) @@ -4058,7 +4070,7 @@ irqreturn_t smblib_handle_wdog_bark(int irq, void *data) if (rc < 0) smblib_err(chg, "Couldn't pet the dog rc=%d\n", rc); - if (chg->step_chg_enabled) + if (chg->step_chg_enabled || chg->sw_jeita_enabled) power_supply_changed(chg->batt_psy); return IRQ_HANDLED; @@ -4696,7 +4708,8 @@ int smblib_init(struct smb_charger *chg) return rc; } - rc = qcom_step_chg_init(chg->step_chg_enabled); + rc = qcom_step_chg_init(chg->step_chg_enabled, + chg->sw_jeita_enabled); if (rc < 0) { smblib_err(chg, "Couldn't init qcom_step_chg_init rc=%d\n", rc); diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index 8c810352f78f..c91f9eaae86b 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -306,6 +306,7 @@ struct smb_charger { int dcp_icl_ua; int fake_capacity; bool step_chg_enabled; + bool sw_jeita_enabled; bool is_hdc; bool chg_done; bool micro_usb_mode; diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c index a2c08be960be..cba01608afb3 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.c +++ b/drivers/power/supply/qcom/step-chg-jeita.c @@ -20,7 +20,7 @@ #define MAX_STEP_CHG_ENTRIES 8 #define STEP_CHG_VOTER "STEP_CHG_VOTER" -#define STATUS_CHANGE_VOTER "STATUS_CHANGE_VOTER" +#define JEITA_VOTER "JEITA_VOTER" #define is_between(left, right, value) \ (((left) >= (right) && (left) >= (value) \ @@ -28,23 +28,44 @@ || ((left) <= (right) && (left) <= (value) \ && (value) <= (right))) -struct step_chg_data { - u32 vbatt_soc_low; - u32 vbatt_soc_high; - u32 fcc_ua; +struct range_data { + u32 low_threshold; + u32 high_threshold; + u32 value; }; struct step_chg_cfg { - u32 psy_prop; - char *prop_name; - struct step_chg_data cfg[MAX_STEP_CHG_ENTRIES]; + u32 psy_prop; + char *prop_name; + int hysteresis; + struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; +}; + +struct jeita_fcc_cfg { + u32 psy_prop; + char *prop_name; + int hysteresis; + struct range_data fcc_cfg[MAX_STEP_CHG_ENTRIES]; +}; + +struct jeita_fv_cfg { + u32 psy_prop; + char *prop_name; + int hysteresis; + struct range_data fv_cfg[MAX_STEP_CHG_ENTRIES]; }; struct step_chg_info { - ktime_t last_update_time; + ktime_t step_last_update_time; + ktime_t jeita_last_update_time; bool step_chg_enable; + bool sw_jeita_enable; + int jeita_fcc_index; + int jeita_fv_index; + int step_index; struct votable *fcc_votable; + struct votable *fv_votable; struct wakeup_source *step_chg_ws; struct power_supply *batt_psy; struct delayed_work status_change_work; @@ -53,32 +74,70 @@ struct step_chg_info { static struct step_chg_info *the_chip; +#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */ + /* * Step Charging Configuration * Update the table based on the battery profile * Supports VBATT and SOC based source + * range data must be in increasing ranges and shouldn't overlap */ static struct step_chg_cfg step_chg_config = { - .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW, - .prop_name = "VBATT", - .cfg = { + .psy_prop = POWER_SUPPLY_PROP_VOLTAGE_NOW, + .prop_name = "VBATT", + .hysteresis = 100000, /* 100mV */ + .fcc_cfg = { /* VBAT_LOW VBAT_HIGH FCC */ {3600000, 4000000, 3000000}, - {4000000, 4200000, 2800000}, - {4200000, 4400000, 2000000}, + {4001000, 4200000, 2800000}, + {4201000, 4400000, 2000000}, }, + /* + * SOC STEP-CHG configuration example. + * + * .psy_prop = POWER_SUPPLY_PROP_CAPACITY, + * .prop_name = "SOC", + * .fcc_cfg = { + * //SOC_LOW SOC_HIGH FCC + * {20, 70, 3000000}, + * {70, 90, 2750000}, + * {90, 100, 2500000}, + * }, + */ +}; + /* - * SOC STEP-CHG configuration example. - * - * .psy_prop = POWER_SUPPLY_PROP_CAPACITY, - * .prop_name = "SOC", - * .cfg = { - * //SOC_LOW SOC_HIGH FCC - * {20, 70, 3000000}, - * {70, 90, 2750000}, - * {90, 100, 2500000}, - * }, + * Jeita Charging Configuration + * Update the table based on the battery profile + * Please ensure that the TEMP ranges are programmed in the hw so that + * an interrupt is issued and a consequent psy changed will cause us to + * react immediately. + * range data must be in increasing ranges and shouldn't overlap. + * Gaps are okay */ +static struct jeita_fcc_cfg jeita_fcc_config = { + .psy_prop = POWER_SUPPLY_PROP_TEMP, + .prop_name = "BATT_TEMP", + .hysteresis = 10, /* 1degC hysteresis */ + .fcc_cfg = { + /* TEMP_LOW TEMP_HIGH FCC */ + {0, 100, 600000}, + {101, 200, 2000000}, + {201, 450, 3000000}, + {451, 550, 600000}, + }, +}; + +static struct jeita_fv_cfg jeita_fv_config = { + .psy_prop = POWER_SUPPLY_PROP_TEMP, + .prop_name = "BATT_TEMP", + .hysteresis = 10, /* 1degC hysteresis */ + .fv_cfg = { + /* TEMP_LOW TEMP_HIGH FCC */ + {0, 100, 4200000}, + {101, 450, 4400000}, + {451, 550, 4200000}, + }, }; static bool is_batt_available(struct step_chg_info *chip) @@ -92,22 +151,67 @@ static bool is_batt_available(struct step_chg_info *chip) return true; } -static int get_fcc(int threshold) +static int get_val(struct range_data *range, int hysteresis, int current_index, + int threshold, + int *new_index, int *val) { int i; + *new_index = -EINVAL; + /* first find the matching index without hysteresis */ for (i = 0; i < MAX_STEP_CHG_ENTRIES; i++) - if (is_between(step_chg_config.cfg[i].vbatt_soc_low, - step_chg_config.cfg[i].vbatt_soc_high, threshold)) - return step_chg_config.cfg[i].fcc_ua; + if (is_between(range[i].low_threshold, + range[i].high_threshold, threshold)) { + *new_index = i; + *val = range[i].value; + } + + /* if nothing was found, return -ENODATA */ + if (*new_index == -EINVAL) + return -ENODATA; + /* + * If we don't have a current_index return this + * newfound value. There is no hysterisis from out of range + * to in range transition + */ + if (current_index == -EINVAL) + return 0; - return -ENODATA; + /* + * Check for hysteresis if it in the neighbourhood + * of our current index. + */ + if (*new_index == current_index + 1) { + if (threshold < range[*new_index].low_threshold + hysteresis) { + /* + * Stay in the current index, threshold is not higher + * by hysteresis amount + */ + *new_index = current_index; + *val = range[current_index].value; + } + } else if (*new_index == current_index - 1) { + if (threshold > range[*new_index].high_threshold - hysteresis) { + /* + * stay in the current index, threshold is not lower + * by hysteresis amount + */ + *new_index = current_index; + *val = range[current_index].value; + } + } + return 0; } static int handle_step_chg_config(struct step_chg_info *chip) { union power_supply_propval pval = {0, }; int rc = 0, fcc_ua = 0; + u64 elapsed_us; + + elapsed_us = ktime_us_delta(ktime_get(), chip->step_last_update_time); + if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US) + goto reschedule; rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED, &pval); @@ -119,7 +223,7 @@ static int handle_step_chg_config(struct step_chg_info *chip) if (!chip->step_chg_enable) { if (chip->fcc_votable) vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0); - return 0; + goto update_time; } rc = power_supply_get_property(chip->batt_psy, @@ -130,48 +234,144 @@ static int handle_step_chg_config(struct step_chg_info *chip) return rc; } - chip->fcc_votable = find_votable("FCC"); - if (!chip->fcc_votable) - return -EINVAL; - - fcc_ua = get_fcc(pval.intval); - if (fcc_ua < 0) { + rc = get_val(step_chg_config.fcc_cfg, step_chg_config.hysteresis, + chip->step_index, + pval.intval, + &chip->step_index, + &fcc_ua); + if (rc < 0) { /* remove the vote if no step-based fcc is found */ - vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0); - return 0; + if (chip->fcc_votable) + vote(chip->fcc_votable, STEP_CHG_VOTER, false, 0); + goto update_time; } + if (!chip->fcc_votable) + chip->fcc_votable = find_votable("FCC"); + if (!chip->fcc_votable) + return -EINVAL; + vote(chip->fcc_votable, STEP_CHG_VOTER, true, fcc_ua); pr_debug("%s = %d Step-FCC = %duA\n", step_chg_config.prop_name, pval.intval, fcc_ua); +update_time: + chip->step_last_update_time = ktime_get(); return 0; + +reschedule: + /* reschedule 1000uS after the remaining time */ + return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000); +} + +static int handle_jeita(struct step_chg_info *chip) +{ + union power_supply_propval pval = {0, }; + int rc = 0, fcc_ua = 0, fv_uv = 0; + u64 elapsed_us; + + if (!chip->sw_jeita_enable) { + if (chip->fcc_votable) + vote(chip->fcc_votable, JEITA_VOTER, false, 0); + if (chip->fv_votable) + vote(chip->fv_votable, JEITA_VOTER, false, 0); + return 0; + } + + elapsed_us = ktime_us_delta(ktime_get(), chip->jeita_last_update_time); + if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US) + goto reschedule; + + rc = power_supply_get_property(chip->batt_psy, + jeita_fcc_config.psy_prop, &pval); + if (rc < 0) { + pr_err("Couldn't read %s property rc=%d\n", + step_chg_config.prop_name, rc); + return rc; + } + + rc = get_val(jeita_fcc_config.fcc_cfg, jeita_fcc_config.hysteresis, + chip->jeita_fcc_index, + pval.intval, + &chip->jeita_fcc_index, + &fcc_ua); + if (rc < 0) { + /* remove the vote if no step-based fcc is found */ + if (chip->fcc_votable) + vote(chip->fcc_votable, JEITA_VOTER, false, 0); + goto update_time; + } + + if (!chip->fcc_votable) + chip->fcc_votable = find_votable("FCC"); + if (!chip->fcc_votable) + /* changing FCC is a must */ + return -EINVAL; + + vote(chip->fcc_votable, JEITA_VOTER, true, fcc_ua); + + rc = get_val(jeita_fv_config.fv_cfg, jeita_fv_config.hysteresis, + chip->jeita_fv_index, + pval.intval, + &chip->jeita_fv_index, + &fv_uv); + if (rc < 0) { + /* remove the vote if no step-based fcc is found */ + if (chip->fv_votable) + vote(chip->fv_votable, JEITA_VOTER, false, 0); + goto update_time; + } + + chip->fv_votable = find_votable("FV"); + if (!chip->fv_votable) + goto update_time; + + vote(chip->fv_votable, JEITA_VOTER, true, fv_uv); + + pr_debug("%s = %d FCC = %duA FV = %duV\n", + step_chg_config.prop_name, pval.intval, fcc_ua, fv_uv); + +update_time: + chip->jeita_last_update_time = ktime_get(); + return 0; + +reschedule: + /* reschedule 1000uS after the remaining time */ + return (STEP_CHG_HYSTERISIS_DELAY_US - elapsed_us + 1000); } -#define STEP_CHG_HYSTERISIS_DELAY_US 5000000 /* 5 secs */ static void status_change_work(struct work_struct *work) { struct step_chg_info *chip = container_of(work, struct step_chg_info, status_change_work.work); int rc = 0; - u64 elapsed_us; - - elapsed_us = ktime_us_delta(ktime_get(), chip->last_update_time); - if (elapsed_us < STEP_CHG_HYSTERISIS_DELAY_US) - goto release_ws; + int reschedule_us; + int reschedule_jeita_work_us = 0; + int reschedule_step_work_us = 0; if (!is_batt_available(chip)) - goto release_ws; + return; + + /* skip elapsed_us debounce for handling battery temperature */ + rc = handle_jeita(chip); + if (rc > 0) + reschedule_jeita_work_us = rc; + else if (rc < 0) + pr_err("Couldn't handle sw jeita rc = %d\n", rc); rc = handle_step_chg_config(chip); + if (rc > 0) + reschedule_step_work_us = rc; if (rc < 0) - goto release_ws; + pr_err("Couldn't handle step rc = %d\n", rc); - chip->last_update_time = ktime_get(); - -release_ws: - __pm_relax(chip->step_chg_ws); + reschedule_us = min(reschedule_jeita_work_us, reschedule_step_work_us); + if (reschedule_us == 0) + __pm_relax(chip->step_chg_ws); + else + schedule_delayed_work(&chip->status_change_work, + usecs_to_jiffies(reschedule_us)); } static int step_chg_notifier_call(struct notifier_block *nb, @@ -205,7 +405,7 @@ static int step_chg_register_notifier(struct step_chg_info *chip) return 0; } -int qcom_step_chg_init(bool step_chg_enable) +int qcom_step_chg_init(bool step_chg_enable, bool sw_jeita_enable) { int rc; struct step_chg_info *chip; @@ -226,6 +426,11 @@ int qcom_step_chg_init(bool step_chg_enable) } chip->step_chg_enable = step_chg_enable; + chip->sw_jeita_enable = sw_jeita_enable; + + chip->step_index = -EINVAL; + chip->jeita_fcc_index = -EINVAL; + chip->jeita_fv_index = -EINVAL; if (step_chg_enable && (!step_chg_config.psy_prop || !step_chg_config.prop_name)) { @@ -234,6 +439,20 @@ int qcom_step_chg_init(bool step_chg_enable) return -ENODATA; } + if (sw_jeita_enable && (!jeita_fcc_config.psy_prop || + !jeita_fcc_config.prop_name)) { + /* fail if step-chg configuration is invalid */ + pr_err("Jeita TEMP configuration not defined - fail\n"); + return -ENODATA; + } + + if (sw_jeita_enable && (!jeita_fv_config.psy_prop || + !jeita_fv_config.prop_name)) { + /* fail if step-chg configuration is invalid */ + pr_err("Jeita TEMP configuration not defined - fail\n"); + return -ENODATA; + } + INIT_DELAYED_WORK(&chip->status_change_work, status_change_work); rc = step_chg_register_notifier(chip); diff --git a/drivers/power/supply/qcom/step-chg-jeita.h b/drivers/power/supply/qcom/step-chg-jeita.h index 928eeb7a670b..53335c3c2c70 100644 --- a/drivers/power/supply/qcom/step-chg-jeita.h +++ b/drivers/power/supply/qcom/step-chg-jeita.h @@ -12,6 +12,6 @@ #ifndef __STEP_CHG_H__ #define __STEP_CHG_H__ -int qcom_step_chg_init(bool); +int qcom_step_chg_init(bool, bool); void qcom_step_chg_deinit(void); #endif /* __STEP_CHG_H__ */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index aae796678ffe..47106f937371 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -2416,7 +2416,8 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba, */ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg) { - int ret = 0, scm_ret = 0; + int ret = 0; + u64 scm_ret = 0; struct ufs_qcom_host *host = ufshcd_get_variant(hba); /* scm command buffer structrue */ @@ -2457,7 +2458,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg) cbuf.device_id = UFS_TZ_DEV_ID; ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret); if (ret || scm_ret) { - dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %d\n", + dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n", __func__, ret, scm_ret); if (!ret) ret = scm_ret; @@ -2466,7 +2467,7 @@ static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg) } out: - dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %d\n", + dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n", __func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret); return ret; } diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index bf815cb68f90..39e76072f596 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -266,7 +266,6 @@ struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = { struct icnss_event_pd_service_down_data { bool crashed; bool fw_rejuvenate; - bool wdog_bite; }; struct icnss_driver_event { @@ -291,7 +290,6 @@ enum icnss_driver_state { ICNSS_PD_RESTART, ICNSS_MSA0_ASSIGNED, ICNSS_WLFW_EXISTS, - ICNSS_WDOG_BITE, ICNSS_SHUTDOWN_DONE, ICNSS_HOST_TRIGGERED_PDR, }; @@ -2149,10 +2147,7 @@ static int icnss_pd_restart_complete(struct icnss_priv *priv) icnss_pm_relax(priv); - if (test_bit(ICNSS_WDOG_BITE, &priv->state)) { - icnss_call_driver_shutdown(priv); - clear_bit(ICNSS_WDOG_BITE, &priv->state); - } + icnss_call_driver_shutdown(priv); clear_bit(ICNSS_PD_RESTART, &priv->state); @@ -2302,8 +2297,7 @@ static int icnss_call_driver_remove(struct icnss_priv *priv) static int icnss_fw_crashed(struct icnss_priv *priv, struct icnss_event_pd_service_down_data *event_data) { - icnss_pr_dbg("FW crashed, state: 0x%lx, wdog_bite: %d\n", - priv->state, event_data->wdog_bite); + icnss_pr_dbg("FW crashed, state: 0x%lx\n", priv->state); set_bit(ICNSS_PD_RESTART, &priv->state); clear_bit(ICNSS_FW_READY, &priv->state); @@ -2313,17 +2307,9 @@ static int icnss_fw_crashed(struct icnss_priv *priv, if (test_bit(ICNSS_DRIVER_PROBED, &priv->state)) icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_CRASHED, NULL); - if (event_data->wdog_bite) { - set_bit(ICNSS_WDOG_BITE, &priv->state); - goto out; - } - - icnss_call_driver_shutdown(priv); - if (event_data->fw_rejuvenate) wlfw_rejuvenate_ack_send_sync_msg(priv); -out: return 0; } @@ -2520,9 +2506,6 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb, event_data->crashed = notif->crashed; - if (notif->crashed == CRASH_STATUS_WDOG_BITE) - event_data->wdog_bite = true; - fw_down_data.crashed = !!notif->crashed; icnss_call_driver_uevent(priv, ICNSS_UEVENT_FW_DOWN, &fw_down_data); @@ -2612,7 +2595,6 @@ static int icnss_service_notifier_notify(struct notifier_block *nb, switch (*state) { case ROOT_PD_WDOG_BITE: - event_data->wdog_bite = true; priv->stats.recovery.root_pd_crash++; break; case ROOT_PD_SHUTDOWN: @@ -3832,9 +3814,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv) case ICNSS_WLFW_EXISTS: seq_puts(s, "WLAN FW EXISTS"); continue; - case ICNSS_WDOG_BITE: - seq_puts(s, "MODEM WDOG BITE"); - continue; case ICNSS_SHUTDOWN_DONE: seq_puts(s, "SHUTDOWN DONE"); continue; diff --git a/drivers/soc/qcom/jtagv8-etm.c b/drivers/soc/qcom/jtagv8-etm.c index 2c15f7896c82..63432e6026e2 100644 --- a/drivers/soc/qcom/jtagv8-etm.c +++ b/drivers/soc/qcom/jtagv8-etm.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1503,6 +1503,7 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; + u64 version = 0; if (!etm[cpu]) goto out; @@ -1524,8 +1525,8 @@ static int jtag_mm_etm_callback(struct notifier_block *nfb, goto out; } if (etm_arch_supported(etm[cpu]->arch)) { - if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < - TZ_DBG_ETM_VER) + if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) + && version < TZ_DBG_ETM_VER) etm[cpu]->save_restore_enabled = true; else pr_info("etm save-restore supported by TZ\n"); @@ -1615,8 +1616,10 @@ static int jtag_mm_etm_probe(struct platform_device *pdev, uint32_t cpu) mutex_lock(&etmdata->mutex); if (etmdata->init && !etmdata->enable) { if (etm_arch_supported(etmdata->arch)) { - if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < - TZ_DBG_ETM_VER) + u64 version = 0; + + if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) + && (version < TZ_DBG_ETM_VER)) etmdata->save_restore_enabled = true; else pr_info("etm save-restore supported by TZ\n"); diff --git a/drivers/soc/qcom/jtagv8.c b/drivers/soc/qcom/jtagv8.c index 94c391eabaea..f09ccce8f9c3 100644 --- a/drivers/soc/qcom/jtagv8.c +++ b/drivers/soc/qcom/jtagv8.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -974,7 +974,7 @@ static struct notifier_block jtag_cpu_pm_notifier = { static int __init msm_jtag_dbg_init(void) { int ret; - + u64 version = 0; if (msm_jtag_fuse_apps_access_disabled()) return -EPERM; @@ -982,7 +982,8 @@ static int __init msm_jtag_dbg_init(void) dbg_init_arch_data(); if (dbg_arch_supported(dbg.arch)) { - if (scm_get_feat_version(TZ_DBG_ETM_FEAT_ID) < TZ_DBG_ETM_VER) { + if (!scm_get_feat_version(TZ_DBG_ETM_FEAT_ID, &version) && + version < TZ_DBG_ETM_VER) { dbg.save_restore_enabled = true; } else { pr_info("dbg save-restore supported by TZ\n"); diff --git a/drivers/soc/qcom/qpnp-haptic.c b/drivers/soc/qcom/qpnp-haptic.c index d86f8671705a..38cc86963181 100644 --- a/drivers/soc/qcom/qpnp-haptic.c +++ b/drivers/soc/qcom/qpnp-haptic.c @@ -2233,13 +2233,23 @@ static void qpnp_hap_td_enable(struct timed_output_dev *dev, int time_ms) timed_dev); int rc; - if (time_ms <= 0) + if (time_ms < 0) return; + mutex_lock(&hap->lock); + + if (time_ms == 0) { + /* disable haptics */ + hrtimer_cancel(&hap->hap_timer); + hap->state = 0; + schedule_work(&hap->work); + mutex_unlock(&hap->lock); + return; + } + if (time_ms < 10) time_ms = 10; - mutex_lock(&hap->lock); if (is_sw_lra_auto_resonance_control(hap)) hrtimer_cancel(&hap->auto_res_err_poll_timer); diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c index 31de6e5c173c..43e2e4d17648 100644 --- a/drivers/soc/qcom/scm.c +++ b/drivers/soc/qcom/scm.c @@ -397,18 +397,22 @@ static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5, __asmeq("%1", R1_STR) __asmeq("%2", R2_STR) __asmeq("%3", R3_STR) - __asmeq("%4", R0_STR) - __asmeq("%5", R1_STR) - __asmeq("%6", R2_STR) - __asmeq("%7", R3_STR) - __asmeq("%8", R4_STR) - __asmeq("%9", R5_STR) - __asmeq("%10", R6_STR) + __asmeq("%4", R4_STR) + __asmeq("%5", R5_STR) + __asmeq("%6", R6_STR) + __asmeq("%7", R0_STR) + __asmeq("%8", R1_STR) + __asmeq("%9", R2_STR) + __asmeq("%10", R3_STR) + __asmeq("%11", R4_STR) + __asmeq("%12", R5_STR) + __asmeq("%13", R6_STR) #ifdef REQUIRES_SEC ".arch_extension sec\n" #endif "smc #0\n" - : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) + : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3), + "=r" (r4), "=r" (r5), "=r" (r6) : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6) : "x7", "x8", "x9", "x10", "x11", "x12", "x13", @@ -442,18 +446,22 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5, __asmeq("%1", R1_STR) __asmeq("%2", R2_STR) __asmeq("%3", R3_STR) - __asmeq("%4", R0_STR) - __asmeq("%5", R1_STR) - __asmeq("%6", R2_STR) - __asmeq("%7", R3_STR) - __asmeq("%8", R4_STR) - __asmeq("%9", R5_STR) - __asmeq("%10", R6_STR) + __asmeq("%4", R4_STR) + __asmeq("%5", R5_STR) + __asmeq("%6", R6_STR) + __asmeq("%7", R0_STR) + __asmeq("%8", R1_STR) + __asmeq("%9", R2_STR) + __asmeq("%10", R3_STR) + __asmeq("%11", R4_STR) + __asmeq("%12", R5_STR) + __asmeq("%13", R6_STR) #ifdef REQUIRES_SEC ".arch_extension sec\n" #endif "smc #0\n" - : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) + : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3), + "=r" (r4), "=r" (r5), "=r" (r6) : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6) : "x7", "x8", "x9", "x10", "x11", "x12", "x13", @@ -490,18 +498,22 @@ static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5, __asmeq("%1", R1_STR) __asmeq("%2", R2_STR) __asmeq("%3", R3_STR) - __asmeq("%4", R0_STR) - __asmeq("%5", R1_STR) - __asmeq("%6", R2_STR) - __asmeq("%7", R3_STR) - __asmeq("%8", R4_STR) - __asmeq("%9", R5_STR) - __asmeq("%10", R6_STR) + __asmeq("%4", R4_STR) + __asmeq("%5", R5_STR) + __asmeq("%6", R6_STR) + __asmeq("%7", R0_STR) + __asmeq("%8", R1_STR) + __asmeq("%9", R2_STR) + __asmeq("%10", R3_STR) + __asmeq("%11", R4_STR) + __asmeq("%12", R5_STR) + __asmeq("%13", R6_STR) #ifdef REQUIRES_SEC ".arch_extension sec\n" #endif "smc #0\n" - : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) + : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3), + "=r" (r4), "=r" (r5), "=r" (r6) : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5), "r" (r6)); @@ -1117,54 +1129,55 @@ int scm_is_call_available(u32 svc_id, u32 cmd_id) ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd, sizeof(svc_cmd), &ret_val, sizeof(ret_val)); - if (ret) - return ret; + if (!ret && ret_val) + return 1; + else + return 0; - return ret_val; } desc.arginfo = SCM_ARGS(1); desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id); + desc.ret[0] = 0; ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc); - if (ret) - return ret; + if (!ret && desc.ret[0]) + return 1; + else + return 0; - return desc.ret[0]; } EXPORT_SYMBOL(scm_is_call_available); #define GET_FEAT_VERSION_CMD 3 -int scm_get_feat_version(u32 feat) +int scm_get_feat_version(u32 feat, u64 *scm_ret) { struct scm_desc desc = {0}; int ret; if (!is_scm_armv8()) { if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) { - u32 version; - if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat, - sizeof(feat), &version, sizeof(version))) - return version; + ret = scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, + &feat, sizeof(feat), scm_ret, sizeof(*scm_ret)); + return ret; } - return 0; } ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD); if (ret <= 0) - return 0; + return -EAGAIN; desc.args[0] = feat; desc.arginfo = SCM_ARGS(1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD), &desc); - if (!ret) - return desc.ret[0]; - return 0; + *scm_ret = desc.ret[0]; + + return ret; } EXPORT_SYMBOL(scm_get_feat_version); #define RESTORE_SEC_CFG 2 -int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) +int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret) { struct scm_desc desc = {0}; int ret; diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c index 54a978157bda..0263350ae931 100644 --- a/drivers/soc/qcom/scm_qcpe.c +++ b/drivers/soc/qcom/scm_qcpe.c @@ -219,7 +219,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) if (!opened) { ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0); if (ret != HAB_OK) { - pr_err("scm_call2: habmm_socket_open failed with ret = %d", + pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d", ret); return ret; } @@ -239,19 +239,26 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) return ret; size_bytes = sizeof(smc_params); + memset(&smc_params, 0x0, sizeof(smc_params)); ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0); if (ret != HAB_OK) return ret; + if (size_bytes != sizeof(smc_params)) { + pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n", + sizeof(smc_params), size_bytes); + return SCM_ERROR; + } + desc->ret[0] = smc_params.x1; desc->ret[1] = smc_params.x2; desc->ret[2] = smc_params.x3; - pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx", - desc->ret[0], desc->ret[1], desc->ret[2]); + pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx", + smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]); - return 0; + return smc_params.x0; } static u32 smc(u32 cmd_addr) diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 4307937d9f6d..e7a00cdb5b03 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -424,13 +424,14 @@ const char *msm_secure_vmid_to_string(int secure_vmid) bool msm_secure_v2_is_supported(void) { - int version = scm_get_feat_version(FEATURE_ID_CP); + u64 version; + int ret = scm_get_feat_version(FEATURE_ID_CP, &version); /* * if the version is < 1.1.0 then dynamic buffer allocation is * not supported */ - return version >= MAKE_CP_VERSION(1, 1, 0); + return (ret == 0) && (version >= MAKE_CP_VERSION(1, 1, 0)); } static int __init alloc_secure_shared_memory(void) diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index dfb6d2f77af1..62d2667ee2f6 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -237,10 +237,12 @@ void ion_cma_heap_destroy(struct ion_heap *heap) static void ion_secure_cma_free(struct ion_buffer *buffer) { - int ret = 0; + int i, ret = 0; int source_vm; int dest_vmid; int dest_perms; + struct sg_table *sgt; + struct scatterlist *sg; struct ion_cma_buffer_info *info = buffer->priv_virt; source_vm = get_secure_vmid(buffer->flags); @@ -251,14 +253,17 @@ static void ion_secure_cma_free(struct ion_buffer *buffer) dest_vmid = VMID_HLOS; dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC; - ret = hyp_assign_table(info->table, &source_vm, 1, - &dest_vmid, &dest_perms, 1); + sgt = info->table; + ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vmid, &dest_perms, 1); if (ret) { pr_err("%s: Not freeing memory since assign failed\n", __func__); return; } + for_each_sg(sgt->sgl, sg, sgt->nents, i) + ClearPagePrivate(sg_page(sgt->sgl)); + ion_cma_free(buffer); } @@ -266,11 +271,13 @@ static int ion_secure_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, unsigned long align, unsigned long flags) { - int ret = 0; + int i, ret = 0; int source_vm; int dest_vm; int dest_perms; struct ion_cma_buffer_info *info; + struct sg_table *sgt; + struct scatterlist *sg; source_vm = VMID_HLOS; dest_vm = get_secure_vmid(flags); @@ -292,12 +299,17 @@ static int ion_secure_cma_allocate(struct ion_heap *heap, } info = buffer->priv_virt; - ret = hyp_assign_table(info->table, &source_vm, 1, - &dest_vm, &dest_perms, 1); + sgt = info->table; + ret = hyp_assign_table(sgt, &source_vm, 1, &dest_vm, &dest_perms, 1); if (ret) { pr_err("%s: Assign call failed\n", __func__); goto err; } + + /* Set the private bit to indicate that we've secured this */ + for_each_sg(sgt->sgl, sg, sgt->nents, i) + SetPagePrivate(sg_page(sgt->sgl)); + return ret; err: diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 7b4af519e17e..8fed55342b0f 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -2911,6 +2911,7 @@ static int __init comedi_init(void) dev = comedi_alloc_board_minor(NULL); if (IS_ERR(dev)) { comedi_cleanup_board_minors(); + class_destroy(comedi_class); cdev_del(&comedi_cdev); unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 01e642db311e..f35ee85f61b5 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -529,6 +529,9 @@ static int vnt_start(struct ieee80211_hw *hw) goto free_all; } + if (vnt_key_init_table(priv)) + goto free_all; + priv->int_interval = 1; /* bInterval is set to 1 */ vnt_int_start_interrupt(priv); diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c index fc9faaee3170..b80e75fc3521 100644 --- a/drivers/tty/serial/msm_serial_hs.c +++ b/drivers/tty/serial/msm_serial_hs.c @@ -2255,7 +2255,7 @@ void disable_wakeup_interrupt(struct msm_hs_port *msm_uport) return; if (msm_uport->wakeup.enabled) { - disable_irq_nosync(msm_uport->wakeup.irq); + disable_irq(msm_uport->wakeup.irq); enable_irq(uport->irq); spin_lock_irqsave(&uport->lock, flags); msm_uport->wakeup.enabled = false; @@ -2614,8 +2614,7 @@ static int msm_hs_startup(struct uart_port *uport) msm_hs_resource_vote(msm_uport); if (is_use_low_power_wakeup(msm_uport)) { - ret = request_threaded_irq(msm_uport->wakeup.irq, NULL, - msm_hs_wakeup_isr, + ret = request_irq(msm_uport->wakeup.irq, msm_hs_wakeup_isr, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "msm_hs_wakeup", msm_uport); if (unlikely(ret)) { diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 5ab54ef4f304..e4f69bddcfb1 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -2708,13 +2708,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) * related to the kernel should not use this. */ data = vt_get_shift_state(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_GETMOUSEREPORTING: console_lock(); /* May be overkill */ data = mouse_reporting(); console_unlock(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETVESABLANK: console_lock(); @@ -2723,7 +2723,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) break; case TIOCL_GETKMSGREDIRECT: data = vt_get_kmsg_redirect(); - ret = __put_user(data, p); + ret = put_user(data, p); break; case TIOCL_SETKMSGREDIRECT: if (!capable(CAP_SYS_ADMIN)) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 96b21b0dac1e..3116edfcdc18 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Blackmagic Design UltraStudio SDI */ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + /* Hauppauge HVR-950q */ + { USB_DEVICE(0x2040, 0x7200), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index c3077ac11709..70db070b05d8 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -2634,8 +2634,6 @@ done: static void check_for_sdp_connection(struct work_struct *w) { - int ret; - union power_supply_propval pval = {0}; struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sdp_check.work); struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3); @@ -2646,21 +2644,6 @@ static void check_for_sdp_connection(struct work_struct *w) /* floating D+/D- lines detected */ if (dwc->gadget.state < USB_STATE_DEFAULT && dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) { - if (!mdwc->usb_psy) { - mdwc->usb_psy = power_supply_get_by_name("usb"); - if (!mdwc->usb_psy) { - dev_dbg(mdwc->dev, - "Could not get usb power_supply\n"); - return; - } - } - pval.intval = -ETIMEDOUT; - ret = power_supply_set_property(mdwc->usb_psy, - POWER_SUPPLY_PROP_CURRENT_MAX, &pval); - if (ret) - dev_dbg(mdwc->dev, - "power supply error when setting property\n"); - mdwc->vbus_active = 0; dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active); queue_work(mdwc->dwc3_wq, &mdwc->resume_work); @@ -3708,14 +3691,16 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA) return 0; psy_type = get_psy_type(mdwc); - if (psy_type != POWER_SUPPLY_TYPE_USB && - psy_type != POWER_SUPPLY_TYPE_USB_FLOAT) + if (psy_type == POWER_SUPPLY_TYPE_USB) { + dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA); + /* Set max current limit in uA */ + pval.intval = 1000 * mA; + } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) { + pval.intval = -ETIMEDOUT; + } else { return 0; + } - dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA); - - /* Set max current limit in uA */ - pval.intval = 1000 * mA; ret = power_supply_set_property(mdwc->usb_psy, POWER_SUPPLY_PROP_CURRENT_MAX, &pval); if (ret) { diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 5c0adb9c6fb2..81db2fa08cad 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -224,7 +224,7 @@ static int st_dwc3_probe(struct platform_device *pdev) dwc3_data->syscfg_reg_off = res->start; - dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n", + dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n", dwc3_data->glue_base, dwc3_data->syscfg_reg_off); dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown"); diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c index 28ac8d0010d8..1a281833eadd 100644 --- a/drivers/usb/gadget/function/f_ccid.c +++ b/drivers/usb/gadget/function/f_ccid.c @@ -26,7 +26,7 @@ #include "f_ccid.h" #define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header) -#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header) +#define BULK_OUT_BUFFER_SIZE 1024 #define CTRL_BUF_SIZE 4 #define FUNCTION_NAME "ccid" #define MAX_INST_NAME_LEN 40 @@ -629,14 +629,14 @@ static ssize_t ccid_bulk_read(struct file *fp, char __user *buf, struct f_ccid *ccid_dev = fp->private_data; struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev; struct usb_request *req; - int r = count, xfer; + int r = count, xfer, len; int ret; unsigned long flags; pr_debug("ccid_bulk_read(%zu)\n", count); if (count > BULK_OUT_BUFFER_SIZE) { - pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n", + pr_err("%s: max_buffer_size:%d given_pkt_size:%zu\n", __func__, BULK_OUT_BUFFER_SIZE, count); return -ENOMEM; } @@ -647,6 +647,7 @@ static ssize_t ccid_bulk_read(struct file *fp, char __user *buf, goto done; } + len = ALIGN(count, ccid_dev->out->maxpacket); requeue_req: spin_lock_irqsave(&ccid_dev->lock, flags); if (!atomic_read(&ccid_dev->online)) { @@ -655,7 +656,7 @@ requeue_req: } /* queue a request */ req = bulk_dev->rx_req; - req->length = count; + req->length = len; bulk_dev->rx_done = 0; spin_unlock_irqrestore(&ccid_dev->lock, flags); ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL); @@ -688,6 +689,9 @@ requeue_req: spin_unlock_irqrestore(&ccid_dev->lock, flags); goto requeue_req; } + if (req->actual > count) + pr_err("%s More data received(%d) than required(%zu)\n", + __func__, req->actual, count); xfer = (req->actual < count) ? req->actual : count; atomic_set(&bulk_dev->rx_req_busy, 1); spin_unlock_irqrestore(&ccid_dev->lock, flags); @@ -875,7 +879,8 @@ static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf, count = CTRL_BUF_SIZE; ret = wait_event_interruptible(ctrl_dev->tx_wait_q, - ctrl_dev->tx_ctrl_done); + ctrl_dev->tx_ctrl_done || + !atomic_read(&ccid_dev->online)); if (ret < 0) return ret; ctrl_dev->tx_ctrl_done = 0; diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 3f903d4776b4..19fe6c8cb25a 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -1579,6 +1579,12 @@ static void gsi_rndis_command_complete(struct usb_ep *ep, struct f_gsi *rndis = req->context; int status; + if (req->status != 0) { + log_event_err("RNDIS command completion error %d\n", + req->status); + return; + } + status = rndis_msg_parser(rndis->params, (u8 *) req->buf); if (status < 0) log_event_err("RNDIS command error %d, %d/%d", diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c index 434af820e827..a28bcd084dc3 100644 --- a/drivers/usb/gadget/function/f_qc_rndis.c +++ b/drivers/usb/gadget/function/f_qc_rndis.c @@ -545,6 +545,12 @@ static void rndis_qc_command_complete(struct usb_ep *ep, rndis_init_msg_type *buf; u32 ul_max_xfer_size, dl_max_xfer_size; + if (req->status != 0) { + pr_err("%s: RNDIS command completion error %d\n", + __func__, req->status); + return; + } + spin_lock(&rndis_lock); rndis = _rndis_qc; if (!rndis || !rndis->notify || !rndis->notify->driver_data) { diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index 13888821109d..0917bc500023 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -463,6 +463,12 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) int status; rndis_init_msg_type *buf; + if (req->status != 0) { + pr_err("%s: RNDIS command completion error:%d\n", + __func__, req->status); + return; + } + /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ // spin_lock(&dev->lock); status = rndis_msg_parser(rndis->params, (u8 *) req->buf); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 33cec50978b8..b0dc6da3d970 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -134,6 +134,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3bf61acfc26b..ebe51f11105d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1877,6 +1877,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&four_g_w100_blacklist }, { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index fd509ed6cf70..652b4334b26d 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -158,6 +158,7 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ + {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */ {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 44ab43fc4fcc..af10f7b131a4 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) kmem_cache_free(stub_priv_cache, priv); kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + kfree(urb->setup_packet); + urb->setup_packet = NULL; + usb_free_urb(urb); } } diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index dbcabc9dbe0d..021003c4de53 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv) struct urb *urb = priv->urb; kfree(urb->setup_packet); + urb->setup_packet = NULL; + kfree(urb->transfer_buffer); + urb->transfer_buffer = NULL; + list_del(&priv->list); kmem_cache_free(stub_priv_cache, priv); usb_free_urb(urb); diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index 4ac10ab494e5..5f7e7c6bcde0 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -2720,8 +2720,6 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata, ctrl_pdata->update_phy_timing); rc = mdss_dsi_on(pdata); - mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode, - pdata); break; case MDSS_EVENT_UNBLANK: if (ctrl_pdata->on_cmds.link_state == DSI_LP_MODE) @@ -3268,21 +3266,6 @@ end: return rc; } -static void mdss_dsi_ctrl_validate_lane_swap_config( - struct mdss_dsi_ctrl_pdata *ctrl) -{ - struct mipi_panel_info *mipi = &ctrl->panel_data.panel_info.mipi; - - if (!mipi->data_lane0) - ctrl->lane_map[DSI_LOGICAL_LANE_0] = DSI_PHYSICAL_LANE_INVALID; - if (!mipi->data_lane1) - ctrl->lane_map[DSI_LOGICAL_LANE_1] = DSI_PHYSICAL_LANE_INVALID; - if (!mipi->data_lane2) - ctrl->lane_map[DSI_LOGICAL_LANE_2] = DSI_PHYSICAL_LANE_INVALID; - if (!mipi->data_lane3) - ctrl->lane_map[DSI_LOGICAL_LANE_3] = DSI_PHYSICAL_LANE_INVALID; -} - static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl) { int rc = 0; @@ -3292,8 +3275,6 @@ static int mdss_dsi_ctrl_validate_config(struct mdss_dsi_ctrl_pdata *ctrl) goto error; } - mdss_dsi_ctrl_validate_lane_swap_config(ctrl); - /* * check to make sure that the byte interface clock is specified for * DSI ctrl version 2 and above. diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c index abd2192997e5..2143c2bdb84b 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c @@ -156,10 +156,13 @@ struct hdmi_edid_ctrl { }; static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl, - struct msm_hdmi_mode_timing_info *timing) + struct msm_hdmi_mode_timing_info *timing, u32 out_format) { + u32 pclk = hdmi_tx_setup_tmds_clk_rate(timing->pixel_freq, + out_format, false); + if (!timing->supported || - timing->pixel_freq > edid_ctrl->init_data.max_pclk_khz) + pclk > edid_ctrl->init_data.max_pclk_khz) return false; return true; @@ -936,7 +939,8 @@ static void hdmi_edid_add_sink_y420_format(struct hdmi_edid_ctrl *edid_ctrl, u32 ret = hdmi_get_supported_mode(&timing, &edid_ctrl->init_data.ds_data, video_format); - u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing); + u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, + &timing, MDP_Y_CBCR_H2V2); struct hdmi_edid_sink_data *sink = &edid_ctrl->sink_data; if (video_format >= HDMI_VFRMT_MAX) { @@ -1704,7 +1708,8 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl, u32 ret = hdmi_get_supported_mode(&timing, &edid_ctrl->init_data.ds_data, video_format); - u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing); + u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, + &timing, MDP_RGBA_8888); struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data; struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list; u32 i = 0; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c index 9ee0c27b225e..bda93bf0558a 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c @@ -272,20 +272,6 @@ static void hdmi_tx_cable_notify_work(struct work_struct *work) mutex_unlock(&hdmi_ctrl->tx_lock); } /* hdmi_tx_cable_notify_work */ -static bool hdmi_tx_is_cea_format(int mode) -{ - bool cea_fmt; - - if ((mode > 0) && (mode <= HDMI_EVFRMT_END)) - cea_fmt = true; - else - cea_fmt = false; - - DEV_DBG("%s: %s\n", __func__, cea_fmt ? "Yes" : "No"); - - return cea_fmt; -} - static inline bool hdmi_tx_is_hdcp_enabled(struct hdmi_tx_ctrl *hdmi_ctrl) { return hdmi_ctrl->hdcp_feature_on && @@ -3192,9 +3178,7 @@ static int hdmi_tx_power_on(struct hdmi_tx_ctrl *hdmi_ctrl) } hdmi_ctrl->panel.vic = hdmi_ctrl->vic; - - if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) && - hdmi_tx_is_cea_format(hdmi_ctrl->vic)) + if (!hdmi_tx_is_dvi_mode(hdmi_ctrl)) hdmi_ctrl->panel.infoframe = true; else hdmi_ctrl->panel.infoframe = false; diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index 6fb32761a767..410d36a3ac31 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -1797,7 +1797,8 @@ static inline int mdss_mdp_irq_clk_register(struct mdss_data_type *mdata, static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata) { - int ret, scm_ret = 0; + int ret; + u64 scm_ret = 0; if (test_bit(MDSS_CAPS_SCM_RESTORE_NOT_REQUIRED, mdata->mdss_caps_map)) return; @@ -1808,7 +1809,7 @@ static void __mdss_restore_sec_cfg(struct mdss_data_type *mdata) ret = scm_restore_sec_cfg(SEC_DEVICE_MDSS, 0, &scm_ret); if (ret || scm_ret) - pr_warn("scm_restore_sec_cfg failed %d %d\n", + pr_warn("scm_restore_sec_cfg failed %d %llu\n", ret, scm_ret); __mdss_mdp_reg_access_clk_enable(mdata, false); @@ -2779,6 +2780,8 @@ ssize_t mdss_mdp_show_capabilities(struct device *dev, SPRINT(" avr"); if (mdss_has_quirk(mdata, MDSS_QUIRK_HDR_SUPPORT_ENABLED)) SPRINT(" hdr"); + if (mdata->nvig_pipes && mdata->mdp_rev >= MDSS_MDP_HW_REV_300) + SPRINT(" vig_csc_db"); /* double buffered VIG CSC block */ SPRINT("\n"); #undef SPRINT diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c index 3ccc09d58480..ff93c343d41f 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_layer.c +++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c @@ -975,26 +975,31 @@ static int __validate_layer_reconfig(struct mdp_input_layer *layer, struct mdss_mdp_pipe *pipe) { int status = 0; - struct mdss_mdp_format_params *src_fmt; + struct mdss_mdp_format_params *layer_src_fmt; + struct mdss_data_type *mdata = mfd_to_mdata(pipe->mfd); + bool is_csc_db = (mdata->mdp_rev < MDSS_MDP_HW_REV_300) ? false : true; + + layer_src_fmt = mdss_mdp_get_format_params(layer->buffer.format); + if (!layer_src_fmt) { + pr_err("Invalid layer format %d\n", layer->buffer.format); + status = -EINVAL; + goto err_exit; + } /* - * csc registers are not double buffered. It is not permitted - * to change them on staged pipe with YUV layer. + * HW earlier to sdm 3.x.x does not support double buffer CSC. + * Invalidate any reconfig of CSC block on staged pipe. */ - if (pipe->csc_coeff_set != layer->color_space) { - src_fmt = mdss_mdp_get_format_params(layer->buffer.format); - if (!src_fmt) { - pr_err("Invalid layer format %d\n", - layer->buffer.format); - status = -EINVAL; - } else { - if (pipe->src_fmt->is_yuv && src_fmt && - src_fmt->is_yuv) { - status = -EPERM; - pr_err("csc change is not permitted on used pipe\n"); - } - } + if (!is_csc_db && + ((!!pipe->src_fmt->is_yuv != !!layer_src_fmt->is_yuv) || + (pipe->src_fmt->is_yuv && layer_src_fmt->is_yuv && + pipe->csc_coeff_set != layer->color_space))) { + pr_err("CSC reconfig not allowed on staged pipe\n"); + status = -EINVAL; + goto err_exit; } + +err_exit: return status; } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 6c031dd1bc4e..8a0243efd359 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -905,17 +905,60 @@ static int load_elf_binary(struct linux_binprm *bprm) elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; + /* + * If we are loading ET_EXEC or we have already performed + * the ET_DYN load_addr calculations, proceed normally. + */ if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { - /* Try and get dynamic programs out of the way of the - * default mmap base, as well as whatever program they - * might try to exec. This is because the brk will - * follow the loader, and is not movable. */ - load_bias = ELF_ET_DYN_BASE - vaddr; - if (current->flags & PF_RANDOMIZE) - load_bias += arch_mmap_rnd(); - load_bias = ELF_PAGESTART(load_bias); + /* + * This logic is run once for the first LOAD Program + * Header for ET_DYN binaries to calculate the + * randomization (load_bias) for all the LOAD + * Program Headers, and to calculate the entire + * size of the ELF mapping (total_size). (Note that + * load_addr_set is set to true later once the + * initial mapping is performed.) + * + * There are effectively two types of ET_DYN + * binaries: programs (i.e. PIE: ET_DYN with INTERP) + * and loaders (ET_DYN without INTERP, since they + * _are_ the ELF interpreter). The loaders must + * be loaded away from programs since the program + * may otherwise collide with the loader (especially + * for ET_EXEC which does not have a randomized + * position). For example to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so + * they cannot share the same load range. Sufficient + * room for the brk must be allocated with the + * loader as well, since brk must be available with + * the loader. + * + * Therefore, programs are loaded offset from + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED). + */ + if (elf_interpreter) { + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); + elf_flags |= MAP_FIXED; + } else + load_bias = 0; + + /* + * Since load_bias is used for all subsequent loading + * calculations, we must lower it by the first vaddr + * so that the remaining calculations based on the + * ELF vaddrs will be correctly offset. The result + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); + total_size = total_mapping_size(elf_phdata, loc->elf_ex.e_phnum); if (!total_size) { diff --git a/fs/exec.c b/fs/exec.c index 073ae12b396e..0428c34d4773 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -206,8 +206,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; - unsigned long ptr_size; - struct rlimit *rlim; + unsigned long ptr_size, limit; /* * Since the stack will hold pointers to the strings, we @@ -236,14 +235,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, return page; /* - * Limit to 1/4-th the stack size for the argv+env strings. + * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM + * (whichever is smaller) for the argv+env strings. * This ensures that: * - the remaining binfmt code will not run out of stack space, * - the program will have a reasonable amount of stack left * to work from. */ - rlim = current->signal->rlim; - if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + limit = _STK_LIM / 4 * 3; + limit = min(limit, rlimit(RLIMIT_STACK) / 4); + if (size > limit) goto fail; } diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 5d09ea585840..c2ee23acf359 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a, int ret; ret = kstrtoull(skip_spaces(buf), 0, &val); - if (!ret || val >= clusters) + if (ret || val >= clusters) return -EINVAL; atomic64_set(&sbi->s_resv_clusters, val); diff --git a/fs/fcntl.c b/fs/fcntl.c index ee85cd4e136a..62376451bbce 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -740,16 +740,10 @@ static int __init fcntl_init(void) * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( - O_RDONLY | O_WRONLY | O_RDWR | - O_CREAT | O_EXCL | O_NOCTTY | - O_TRUNC | O_APPEND | /* O_NONBLOCK | */ - __O_SYNC | O_DSYNC | FASYNC | - O_DIRECT | O_LARGEFILE | O_DIRECTORY | - O_NOFOLLOW | O_NOATIME | O_CLOEXEC | - __FMODE_EXEC | O_PATH | __O_TMPFILE | - __FMODE_NONOTIFY - )); + BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != + HWEIGHT32( + (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | + __FMODE_EXEC | __FMODE_NONOTIFY)); fasync_cache = kmem_cache_create("fasync_cache", sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 9cd8c92b953d..070901e76653 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -80,9 +80,9 @@ static struct rhashtable_params ht_parms = { static struct rhashtable gl_hash_table; -void gfs2_glock_free(struct gfs2_glock *gl) +static void gfs2_glock_dealloc(struct rcu_head *rcu) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); if (gl->gl_ops->go_flags & GLOF_ASPACE) { kmem_cache_free(gfs2_glock_aspace_cachep, gl); @@ -90,6 +90,13 @@ void gfs2_glock_free(struct gfs2_glock *gl) kfree(gl->gl_lksb.sb_lvbptr); kmem_cache_free(gfs2_glock_cachep, gl); } +} + +void gfs2_glock_free(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_glock_wait); } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index be519416c112..4a9077ec9313 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -367,6 +367,7 @@ struct gfs2_glock { loff_t end; } gl_vm; }; + struct rcu_head gl_rcu; struct rhash_head gl_node; }; diff --git a/fs/mount.h b/fs/mount.h index 13a4ebbbaa74..37c64bbe840c 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -57,6 +57,7 @@ struct mount { struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ + struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct hlist_head mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; diff --git a/fs/namespace.c b/fs/namespace.c index 0f52d90c356f..f32450c3e72c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name) INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); + INIT_LIST_HEAD(&mnt->mnt_umounting); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif diff --git a/fs/open.c b/fs/open.c index e70cca15c976..1fd96c5d3895 100644 --- a/fs/open.c +++ b/fs/open.c @@ -898,6 +898,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o int lookup_flags = 0; int acc_mode; + /* + * Clear out all open flags we don't know about so that we don't report + * them in fcntl(F_GETFD) or similar interfaces. + */ + flags &= VALID_OPEN_FLAGS; + if (flags & (O_CREAT | __O_TMPFILE)) op->mode = (mode & S_IALLUGO) | S_IFREG; else diff --git a/fs/pnode.c b/fs/pnode.c index e4e428d621e9..ddb846f878b8 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p) return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); } +static inline struct mount *last_slave(struct mount *p) +{ + return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); +} + static inline struct mount *next_slave(struct mount *p) { return list_entry(p->mnt_slave.next, struct mount, mnt_slave); @@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m, } } +static struct mount *skip_propagation_subtree(struct mount *m, + struct mount *origin) +{ + /* + * Advance m such that propagation_next will not return + * the slaves of m. + */ + if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) + m = last_slave(m); + + return m; +} + static struct mount *next_group(struct mount *m, struct mount *origin) { while (1) { @@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt) } } -/* - * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. - */ -static void mark_umount_candidates(struct mount *mnt) +static void umount_one(struct mount *mnt, struct list_head *to_umount) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; - - BUG_ON(parent == mnt); - - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - if (!child || (child->mnt.mnt_flags & MNT_UMOUNT)) - continue; - if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) { - SET_MNT_MARK(child); - } - } + CLEAR_MNT_MARK(mnt); + mnt->mnt.mnt_flags |= MNT_UMOUNT; + list_del_init(&mnt->mnt_child); + list_del_init(&mnt->mnt_umounting); + list_move_tail(&mnt->mnt_list, to_umount); } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ -static void __propagate_umount(struct mount *mnt) +static bool __propagate_umount(struct mount *mnt, + struct list_head *to_umount, + struct list_head *to_restore) { - struct mount *parent = mnt->mnt_parent; - struct mount *m; + bool progress = false; + struct mount *child; - BUG_ON(parent == mnt); + /* + * The state of the parent won't change if this mount is + * already unmounted or marked as without children. + */ + if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) + goto out; - for (m = propagation_next(parent, parent); m; - m = propagation_next(m, parent)) { - struct mount *topper; - struct mount *child = __lookup_mnt(&m->mnt, - mnt->mnt_mountpoint); - /* - * umount the child only if the child has no children - * and the child is marked safe to unmount. - */ - if (!child || !IS_MNT_MARKED(child)) + /* Verify topper is the only grandchild that has not been + * speculatively unmounted. + */ + list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { + if (child->mnt_mountpoint == mnt->mnt.mnt_root) continue; - CLEAR_MNT_MARK(child); + if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) + continue; + /* Found a mounted child */ + goto children; + } - /* If there is exactly one mount covering all of child - * replace child with that mount. - */ - topper = find_topper(child); - if (topper) - mnt_change_mountpoint(child->mnt_parent, child->mnt_mp, - topper); + /* Mark mounts that can be unmounted if not locked */ + SET_MNT_MARK(mnt); + progress = true; + + /* If a mount is without children and not locked umount it. */ + if (!IS_MNT_LOCKED(mnt)) { + umount_one(mnt, to_umount); + } else { +children: + list_move_tail(&mnt->mnt_umounting, to_restore); + } +out: + return progress; +} + +static void umount_list(struct list_head *to_umount, + struct list_head *to_restore) +{ + struct mount *mnt, *child, *tmp; + list_for_each_entry(mnt, to_umount, mnt_list) { + list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { + /* topper? */ + if (child->mnt_mountpoint == mnt->mnt.mnt_root) + list_move_tail(&child->mnt_umounting, to_restore); + else + umount_one(child, to_umount); + } + } +} - if (list_empty(&child->mnt_mounts)) { - list_del_init(&child->mnt_child); - child->mnt.mnt_flags |= MNT_UMOUNT; - list_move_tail(&child->mnt_list, &mnt->mnt_list); +static void restore_mounts(struct list_head *to_restore) +{ + /* Restore mounts to a clean working state */ + while (!list_empty(to_restore)) { + struct mount *mnt, *parent; + struct mountpoint *mp; + + mnt = list_first_entry(to_restore, struct mount, mnt_umounting); + CLEAR_MNT_MARK(mnt); + list_del_init(&mnt->mnt_umounting); + + /* Should this mount be reparented? */ + mp = mnt->mnt_mp; + parent = mnt->mnt_parent; + while (parent->mnt.mnt_flags & MNT_UMOUNT) { + mp = parent->mnt_mp; + parent = parent->mnt_parent; } + if (parent != mnt->mnt_parent) + mnt_change_mountpoint(parent, mp, mnt); + } +} + +static void cleanup_umount_visitations(struct list_head *visited) +{ + while (!list_empty(visited)) { + struct mount *mnt = + list_first_entry(visited, struct mount, mnt_umounting); + list_del_init(&mnt->mnt_umounting); } } @@ -487,12 +544,69 @@ static void __propagate_umount(struct mount *mnt) int propagate_umount(struct list_head *list) { struct mount *mnt; + LIST_HEAD(to_restore); + LIST_HEAD(to_umount); + LIST_HEAD(visited); + + /* Find candidates for unmounting */ + list_for_each_entry_reverse(mnt, list, mnt_list) { + struct mount *parent = mnt->mnt_parent; + struct mount *m; + + /* + * If this mount has already been visited it is known that it's + * entire peer group and all of their slaves in the propagation + * tree for the mountpoint has already been visited and there is + * no need to visit them again. + */ + if (!list_empty(&mnt->mnt_umounting)) + continue; + + list_add_tail(&mnt->mnt_umounting, &visited); + for (m = propagation_next(parent, parent); m; + m = propagation_next(m, parent)) { + struct mount *child = __lookup_mnt(&m->mnt, + mnt->mnt_mountpoint); + if (!child) + continue; + + if (!list_empty(&child->mnt_umounting)) { + /* + * If the child has already been visited it is + * know that it's entire peer group and all of + * their slaves in the propgation tree for the + * mountpoint has already been visited and there + * is no need to visit this subtree again. + */ + m = skip_propagation_subtree(m, parent); + continue; + } else if (child->mnt.mnt_flags & MNT_UMOUNT) { + /* + * We have come accross an partially unmounted + * mount in list that has not been visited yet. + * Remember it has been visited and continue + * about our merry way. + */ + list_add_tail(&child->mnt_umounting, &visited); + continue; + } + + /* Check the child and parents while progress is made */ + while (__propagate_umount(child, + &to_umount, &to_restore)) { + /* Is the parent a umount candidate? */ + child = child->mnt_parent; + if (list_empty(&child->mnt_umounting)) + break; + } + } + } - list_for_each_entry_reverse(mnt, list, mnt_list) - mark_umount_candidates(mnt); + umount_list(&to_umount, &to_restore); + restore_mounts(&to_restore); + cleanup_umount_visitations(&visited); + list_splice_tail(&to_umount, list); - list_for_each_entry(mnt, list, mnt_list) - __propagate_umount(mnt); return 0; } diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c index 60fea424835f..103dc45a131f 100644 --- a/fs/sdcardfs/inode.c +++ b/fs/sdcardfs/inode.c @@ -766,13 +766,9 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct * afterwards in the other cases: we fsstack_copy_inode_size from * the lower level. */ - if (current->mm) - down_write(¤t->mm->mmap_sem); if (ia->ia_valid & ATTR_SIZE) { err = inode_newsize_ok(&tmp, ia->ia_size); if (err) { - if (current->mm) - up_write(¤t->mm->mmap_sem); goto out; } truncate_setsize(inode, ia->ia_size); @@ -795,8 +791,6 @@ static int sdcardfs_setattr(struct vfsmount *mnt, struct dentry *dentry, struct err = notify_change2(lower_mnt, lower_dentry, &lower_ia, /* note: lower_ia */ NULL); mutex_unlock(&d_inode(lower_dentry)->i_mutex); - if (current->mm) - up_write(¤t->mm->mmap_sem); if (err) goto out; diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c index 3c5b51d49d21..80825b287836 100644 --- a/fs/sdcardfs/main.c +++ b/fs/sdcardfs/main.c @@ -364,41 +364,34 @@ out: return err; } -/* A feature which supports mount_nodev() with options */ -static struct dentry *mount_nodev_with_options(struct vfsmount *mnt, - struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, - int (*fill_super)(struct vfsmount *, struct super_block *, - const char *, void *, int)) +struct sdcardfs_mount_private { + struct vfsmount *mnt; + const char *dev_name; + void *raw_data; +}; +static int __sdcardfs_fill_super( + struct super_block *sb, + void *_priv, int silent) { - int error; - struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); - - if (IS_ERR(s)) - return ERR_CAST(s); - - s->s_flags = flags; + struct sdcardfs_mount_private *priv = _priv; - error = fill_super(mnt, s, dev_name, data, flags & MS_SILENT ? 1 : 0); - if (error) { - deactivate_locked_super(s); - return ERR_PTR(error); - } - s->s_flags |= MS_ACTIVE; - return dget(s->s_root); + return sdcardfs_read_super(priv->mnt, + sb, priv->dev_name, priv->raw_data, silent); } static struct dentry *sdcardfs_mount(struct vfsmount *mnt, struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) { - /* - * dev_name is a lower_path_name, - * raw_data is a option string. - */ - return mount_nodev_with_options(mnt, fs_type, flags, dev_name, - raw_data, sdcardfs_read_super); + struct sdcardfs_mount_private priv = { + .mnt = mnt, + .dev_name = dev_name, + .raw_data = raw_data + }; + + return mount_nodev(fs_type, flags, + &priv, __sdcardfs_fill_super); } static struct dentry *sdcardfs_mount_wrn(struct file_system_type *fs_type, @@ -423,7 +416,7 @@ void sdcardfs_kill_sb(struct super_block *sb) list_del(&sbi->list); mutex_unlock(&sdcardfs_super_list_lock); } - generic_shutdown_super(sb); + kill_anon_super(sb); } static struct file_system_type sdcardfs_fs_type = { diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 6555fdbee88a..f710a7075c0e 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -652,12 +652,15 @@ struct drm_encoder { * @pt_scan_info: PT scan info obtained from the VCDB of EDID * @it_scan_info: IT scan info obtained from the VCDB of EDID * @ce_scan_info: CE scan info obtained from the VCDB of EDID + * @color_enc_fmt: Colorimetry encoding formats of sink * @hdr_eotf: Electro optical transfer function obtained from HDR block * @hdr_metadata_type_one: Metadata type one obtained from HDR block * @hdr_max_luminance: desired max luminance obtained from HDR block * @hdr_avg_luminance: desired avg luminance obtained from HDR block * @hdr_min_luminance: desired min luminance obtained from HDR block * @hdr_supported: does the sink support HDR content + * @rgb_qs: does the sink declare RGB selectable quantization range + * @yuv_qs: does the sink declare YCC selectable quantization range * @edid_corrupt: indicates whether the last read EDID was corrupt * @debugfs_entry: debugfs directory for this connector * @state: current atomic state for this connector @@ -740,12 +743,15 @@ struct drm_connector { u8 pt_scan_info; u8 it_scan_info; u8 ce_scan_info; + u8 color_enc_fmt; u32 hdr_eotf; bool hdr_metadata_type_one; u32 hdr_max_luminance; u32 hdr_avg_luminance; u32 hdr_min_luminance; bool hdr_supported; + bool rgb_qs; + bool yuv_qs; /* Flag for raw EDID header corruption - used in Displayport * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6 */ diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 69cb2ba37116..c0884e120041 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -214,6 +214,15 @@ struct detailed_timing { #define DRM_EDID_YCBCR420_DC_36 (1 << 1) #define DRM_EDID_YCBCR420_DC_30 (1 << 0) +#define DRM_EDID_COLORIMETRY_xvYCC_601 (1 << 0) +#define DRM_EDID_COLORIMETRY_xvYCC_709 (1 << 1) +#define DRM_EDID_COLORIMETRY_sYCC_601 (1 << 2) +#define DRM_EDID_COLORIMETRY_ADBYCC_601 (1 << 3) +#define DRM_EDID_COLORIMETRY_ADB_RGB (1 << 4) +#define DRM_EDID_COLORIMETRY_BT2020_CYCC (1 << 5) +#define DRM_EDID_COLORIMETRY_BT2020_YCC (1 << 6) +#define DRM_EDID_COLORIMETRY_BT2020_RGB (1 << 7) + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 diff --git a/include/linux/device.h b/include/linux/device.h index 4b4e2d5ce6e7..30c52d70c86d 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -368,6 +368,7 @@ int subsys_virtual_register(struct bus_type *subsys, * @suspend: Used to put the device to sleep mode, usually to a low power * state. * @resume: Used to bring the device from the sleep mode. + * @shutdown: Called at shut-down time to quiesce the device. * @ns_type: Callbacks so sysfs can detemine namespaces. * @namespace: Namespace of the device belongs to this class. * @pm: The default device power management operations of this class. @@ -396,6 +397,7 @@ struct class { int (*suspend)(struct device *dev, pm_message_t state); int (*resume)(struct device *dev); + int (*shutdown)(struct device *dev); const struct kobj_ns_type_operations *ns_type; const void *(*namespace)(struct device *dev); diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 76ce329e656d..1b48d9c9a561 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h @@ -3,6 +3,12 @@ #include <uapi/linux/fcntl.h> +/* list of all valid flags for the open/openat flags argument: */ +#define VALID_OPEN_FLAGS \ + (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \ + O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \ + FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \ + O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) #ifndef force_o_largefile #define force_o_largefile() (BITS_PER_LONG != 32) diff --git a/include/linux/ipa.h b/include/linux/ipa.h index c11a5c4afece..a4b817c5e4fc 100644 --- a/include/linux/ipa.h +++ b/include/linux/ipa.h @@ -40,6 +40,14 @@ enum ipa_nat_en_type { }; /** + * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point + */ +enum ipa_ipv6ct_en_type { + IPA_BYPASS_IPV6CT, + IPA_ENABLE_IPV6CT, +}; + +/** * enum ipa_mode_type - mode setting type in IPA end-point * @BASIC: basic mode * @ENABLE_FRAMING_HDLC: not currently supported @@ -119,6 +127,19 @@ struct ipa_ep_cfg_nat { }; /** + * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in + * IPA end-point + * @conn_track_en: Defines speculative conn_track action, means if specific + * pipe needs to have UL/DL IPv6 Connection Tracking or Bypass + * IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking + * 1: IPv6 UL/DL Connection Tracking. + * Valid for Input Pipes only (IPA consumer) + */ +struct ipa_ep_cfg_conn_track { + enum ipa_ipv6ct_en_type conn_track_en; +}; + +/** * struct ipa_ep_cfg_hdr - header configuration in IPA end-point * * @hdr_len:Header length in bytes to be added/removed. Assuming @@ -386,7 +407,8 @@ struct ipa_ep_cfg_seq { /** * struct ipa_ep_cfg - configuration of IPA end-point - * @nat: NAT parmeters + * @nat: NAT parameters + * @conn_track: IPv6CT parameters * @hdr: Header parameters * @hdr_ext: Extended header parameters * @mode: Mode parameters @@ -400,6 +422,7 @@ struct ipa_ep_cfg_seq { */ struct ipa_ep_cfg { struct ipa_ep_cfg_nat nat; + struct ipa_ep_cfg_conn_track conn_track; struct ipa_ep_cfg_hdr hdr; struct ipa_ep_cfg_hdr_ext hdr_ext; struct ipa_ep_cfg_mode mode; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index b584e353306d..6721be921d87 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -249,6 +249,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_HW_CURRENT_MAX, POWER_SUPPLY_PROP_REAL_TYPE, POWER_SUPPLY_PROP_PR_SWAP, + POWER_SUPPLY_PROP_CC_STEP, + POWER_SUPPLY_PROP_CC_STEP_SEL, /* Local extensions of type int64_t */ POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT, /* Properties of type `const char *' */ diff --git a/include/linux/tick.h b/include/linux/tick.h index 1732697ea419..d1162e9b7a36 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -105,6 +105,7 @@ extern void tick_nohz_idle_enter(void); extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); +extern unsigned long tick_nohz_get_idle_calls(void); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); #else /* !CONFIG_NO_HZ_COMMON */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index e888eb9a2eb9..dff7adbc60bb 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -579,9 +579,9 @@ extern void usb_ep0_reinit(struct usb_device *); ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) #define EndpointRequest \ - ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) #define EndpointOutRequest \ - ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8) + ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8) /* class requests from the USB 2.0 hub spec, table 11-15 */ /* GetBusState and SetHubDescriptor are optional, omitted */ diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h index c93364b861d9..e0c47f8b06bf 100644 --- a/include/linux/wcnss_wlan.h +++ b/include/linux/wcnss_wlan.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -38,6 +38,7 @@ struct vregs_level { }; struct wcnss_wlan_config { + bool wcn_external_gpio_support; int use_48mhz_xo; int is_pronto_vadc; int is_pronto_v3; @@ -74,6 +75,8 @@ enum { #define HAVE_WCNSS_CAL_DOWNLOAD 1 #define HAVE_CBC_DONE 1 #define HAVE_WCNSS_RX_BUFF_COUNT 1 +#define HAVE_WCNSS_SNOC_HIGH_FREQ_VOTING 1 +#define HAVE_WCNSS_5G_DISABLE 1 #define WLAN_MAC_ADDR_SIZE (6) #define WLAN_RF_REG_ADDR_START_OFFSET 0x3 #define WLAN_RF_REG_DATA_START_OFFSET 0xf @@ -132,12 +135,14 @@ void wcnss_riva_dump_pmic_regs(void); int wcnss_xo_auto_detect_enabled(void); u32 wcnss_get_wlan_rx_buff_count(void); int wcnss_wlan_iris_xo_mode(void); +int wcnss_wlan_dual_band_disabled(void); void wcnss_flush_work(struct work_struct *work); void wcnss_flush_delayed_work(struct delayed_work *dwork); void wcnss_init_work(struct work_struct *work , void *callbackptr); void wcnss_init_delayed_work(struct delayed_work *dwork , void *callbackptr); int wcnss_get_iris_name(char *iris_version); void wcnss_dump_stack(struct task_struct *task); +void wcnss_snoc_vote(bool clk_chk_en); #ifdef CONFIG_WCNSS_REGISTER_DUMP_ON_BITE void wcnss_log_debug_regs_on_bite(void); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 814a13d22df6..f9bdfb096579 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -21,6 +21,7 @@ struct route_info { #include <net/flow.h> #include <net/ip6_fib.h> #include <net/sock.h> +#include <net/lwtunnel.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/route.h> @@ -209,4 +210,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, return daddr; } +static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b) +{ + return a->dst.dev == b->dst.dev && + a->rt6i_idev == b->rt6i_idev && + ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) && + !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate); +} #endif diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h index af389305207f..f0a3124dae00 100644 --- a/include/soc/qcom/scm.h +++ b/include/soc/qcom/scm.h @@ -121,9 +121,9 @@ extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3, extern u32 scm_get_version(void); extern int scm_is_call_available(u32 svc_id, u32 cmd_id); -extern int scm_get_feat_version(u32 feat); +extern int scm_get_feat_version(u32 feat, u64 *scm_ret); extern bool is_scm_armv8(void); -extern int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret); +extern int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret); extern u32 scm_io_read(phys_addr_t address); extern int scm_io_write(phys_addr_t address, u32 val); extern bool scm_is_secure_device(void); @@ -205,7 +205,7 @@ static inline int scm_is_call_available(u32 svc_id, u32 cmd_id) return 0; } -static inline int scm_get_feat_version(u32 feat) +static inline int scm_get_feat_version(u32 feat, u64 *scm_ret) { return 0; } @@ -215,7 +215,7 @@ static inline bool is_scm_armv8(void) return true; } -static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret) +static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, u64 *scm_ret) { return 0; } diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 7668b5791c91..5539933b3491 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -37,9 +37,56 @@ enum { BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE), }; -enum { +/** + * enum flat_binder_object_shifts: shift values for flat_binder_object_flags + * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy. + * + */ +enum flat_binder_object_shifts { + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9, +}; + +/** + * enum flat_binder_object_flags - flags for use in flat_binder_object.flags + */ +enum flat_binder_object_flags { + /** + * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority + * + * These bits can be used to set the minimum scheduler priority + * at which transactions into this node should run. Valid values + * in these bits depend on the scheduler policy encoded in + * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK. + * + * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19] + * For SCHED_FIFO/SCHED_RR, the value can run between [1..99] + */ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, + /** + * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds. + */ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, + /** + * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy + * + * These two bits can be used to set the min scheduling policy at which + * transactions on this node should run. These match the UAPI + * scheduler policy values, eg: + * 00b: SCHED_NORMAL + * 01b: SCHED_FIFO + * 10b: SCHED_RR + * 11b: SCHED_BATCH + */ + FLAT_BINDER_FLAG_SCHED_POLICY_MASK = + 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT, + + /** + * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy + * + * Only when set, calls into this node will inherit a real-time + * scheduling policy from the caller (for synchronous transactions). + */ + FLAT_BINDER_FLAG_INHERIT_RT = 0x800, }; #ifdef BINDER_IPC_32BIT @@ -186,6 +233,19 @@ struct binder_version { #define BINDER_CURRENT_PROTOCOL_VERSION 8 #endif +/* + * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields. + * Set ptr to NULL for the first call to get the info for the first node, and + * then repeat the call passing the previously returned value to get the next + * nodes. ptr will be 0 when there are no more nodes. + */ +struct binder_node_debug_info { + binder_uintptr_t ptr; + binder_uintptr_t cookie; + __u32 has_strong_ref; + __u32 has_weak_ref; +}; + #define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) #define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) @@ -193,6 +253,7 @@ struct binder_version { #define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) #define BINDER_THREAD_EXIT _IOW('b', 8, __s32) #define BINDER_VERSION _IOWR('b', 9, struct binder_version) +#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) /* * NOTE: Two special error codes you should check for when calling diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h index 60867630e1a1..dc46ee0f29a2 100644 --- a/include/uapi/linux/ipa_qmi_service_v01.h +++ b/include/uapi/linux/ipa_qmi_service_v01.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,6 +47,12 @@ #define QMI_IPA_MAX_FILTERS_EX_V01 128 #define QMI_IPA_MAX_PIPES_V01 20 #define QMI_IPA_MAX_APN_V01 8 +#define QMI_IPA_MAX_PER_CLIENTS_V01 64 +/* Currently max we can use is only 1. But for scalability purpose + * we are having max value as 8. + */ +#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8 +#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64 #define IPA_INT_MAX ((int)(~0U>>1)) #define IPA_INT_MIN (-IPA_INT_MAX - 1) @@ -984,6 +990,16 @@ struct ipa_fltr_installed_notif_req_msg_v01 { * failure, the Rule Ids in this list must be set to a reserved * index (255). */ + + /* Optional */ + /* List of destination pipe IDs. */ + uint8_t dst_pipe_id_valid; + /* Must be set to true if dst_pipe_id is being passed. */ + uint32_t dst_pipe_id_len; + /* Must be set to # of elements in dst_pipe_id. */ + uint32_t dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01]; + /* Provides the list of destination pipe IDs for a source pipe. */ + }; /* Message */ /* Response Message; This is the message that is exchanged between the @@ -1622,6 +1638,273 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 { */ }; /* Message */ +/* + * Request Message; Requests the modem IPA driver to enable or + * disable collection of per client statistics. + */ +struct ipa_enable_per_client_stats_req_msg_v01 { + + /* Mandatory */ + /* Collect statistics per client; */ + uint8_t enable_per_client_stats; + /* + * Indicates whether to start or stop collecting + * per client statistics. + */ +}; /* Message */ + +/* + * Response Message; Requests the modem IPA driver to enable or disable + * collection of per client statistics. + */ +struct ipa_enable_per_client_stats_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type. */ +}; /* Message */ + +struct ipa_per_client_stats_info_type_v01 { + + uint32_t client_id; + /* + * Id of the client on APPS processor side for which Modem processor + * needs to send uplink/downlink statistics. + */ + + uint32_t src_pipe_id; + /* + * IPA consumer pipe on which client on APPS side sent uplink + * data to modem. + */ + + uint64_t num_ul_ipv4_bytes; + /* + * Accumulated number of uplink IPv4 bytes for a client. + */ + + uint64_t num_ul_ipv6_bytes; + /* + * Accumulated number of uplink IPv6 bytes for a client. + */ + + uint64_t num_dl_ipv4_bytes; + /* + * Accumulated number of downlink IPv4 bytes for a client. + */ + + uint64_t num_dl_ipv6_bytes; + /* + * Accumulated number of downlink IPv6 byes for a client. + */ + + + uint32_t num_ul_ipv4_pkts; + /* + * Accumulated number of uplink IPv4 packets for a client. + */ + + uint32_t num_ul_ipv6_pkts; + /* + * Accumulated number of uplink IPv6 packets for a client. + */ + + uint32_t num_dl_ipv4_pkts; + /* + * Accumulated number of downlink IPv4 packets for a client. + */ + + uint32_t num_dl_ipv6_pkts; + /* + * Accumulated number of downlink IPv6 packets for a client. + */ +}; /* Type */ + +/* + * Request Message; Requests the modem IPA driver to provide statistics + * for a givenclient. + */ +struct ipa_get_stats_per_client_req_msg_v01 { + + /* Mandatory */ + /* Client id */ + uint32_t client_id; + /* + * Id of the client on APPS processor side for which Modem processor + * needs to send uplink/downlink statistics. if client id is specified + * as 0xffffffff, then Q6 will send the stats for all the clients of + * the specified source pipe. + */ + + /* Mandatory */ + /* Source pipe id */ + uint32_t src_pipe_id; + /* + * IPA consumer pipe on which client on APPS side sent uplink + * data to modem. In future, this implementation can be extended + * to provide 0xffffffff as the source pipe id, where Q6 will send + * the stats of all the clients across all different tethered-pipes. + */ + + /* Optional */ + /* Reset client statistics. */ + uint8_t reset_stats_valid; + /* Must be set to true if reset_stats is being passed. */ + uint8_t reset_stats; + /* + * Option to reset the statistics currently collected by modem for this + * particular client. + */ +}; /* Message */ + +/* + * Response Message; Requests the modem IPA driver to provide statistics + * for a given client. + */ +struct ipa_get_stats_per_client_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* Standard response type. */ + + /* Optional */ + /* Per clients Statistics List */ + uint8_t per_client_stats_list_valid; + /* Must be set to true if per_client_stats_list is being passed. */ + uint32_t per_client_stats_list_len; + /* Must be set to # of elements in per_client_stats_list. */ + struct ipa_per_client_stats_info_type_v01 + per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01]; + /* + * List of all per client statistics that are retrieved. + */ +}; /* Message */ + +struct ipa_ul_firewall_rule_type_v01 { + + enum ipa_ip_type_enum_v01 ip_type; + /* + * IP type for which this rule is applicable. + * The driver must identify the filter table (v6 or v4), and this + * field is essential for that. Values: + * - QMI_IPA_IP_TYPE_INVALID (0) -- Invalid IP type identifier + * - QMI_IPA_IP_TYPE_V4 (1) -- IPv4 type + * - QMI_IPA_IP_TYPE_V6 (2) -- IPv6 type + */ + + struct ipa_filter_rule_type_v01 filter_rule; + /* + * Rules in the filter specification. These rules are the + * ones that are matched against fields in the packet. + * Currently we only send IPv6 whitelist rules to Q6. + */ +}; /* Type */ + +/* + * Request Message; Requestes remote IPA driver to install uplink + * firewall rules. + */ +struct ipa_configure_ul_firewall_rules_req_msg_v01 { + + /* Optional */ + /* Uplink Firewall Specification */ + uint32_t firewall_rules_list_len; + /* Must be set to # of elements in firewall_rules_list. */ + struct ipa_ul_firewall_rule_type_v01 + firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01]; + /* + * List of uplink firewall specifications of filters that must be + * installed. + */ + + uint32_t mux_id; + /* + * QMAP Mux ID. As a part of the QMAP protocol, + * several data calls may be multiplexed over the same physical + * transport channel. This identifier is used to identify one + * such data call. The maximum value for this identifier is 255. + */ + + /* Optional */ + uint8_t disable_valid; + /* Must be set to true if enable is being passed. */ + uint8_t disable; + /* + * Indicates whether uplink firewall needs to be enabled or disabled. + */ + + /* Optional */ + uint8_t are_blacklist_filters_valid; + /* Must be set to true if are_blacklist_filters is being passed. */ + uint8_t are_blacklist_filters; + /* + * Indicates whether the filters received as part of this message are + * blacklist filters. i.e. drop uplink packets matching these rules. + */ +}; /* Message */ + +/* + * Response Message; Requestes remote IPA driver to install + * uplink firewall rules. + */ +struct ipa_configure_ul_firewall_rules_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + struct ipa_qmi_response_type_v01 resp; + /* + * Standard response type. + * Standard response type. Contains the following data members: + * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE + * qmi_error_type -- Error code. Possible error code values are + * described in the error codes section of each message definition. + */ +}; /* Message */ + +enum ipa_ul_firewall_status_enum_v01 { + IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* To force a 32 bit signed enum. Do not change or use*/ + QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0, + /* Indicates that the uplink firewall rules + * are configured successfully. + */ + QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1, + /* Indicates that the uplink firewall rules + * are not configured successfully. + */ + IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use*/ +}; + +struct ipa_ul_firewall_config_result_type_v01 { + + enum ipa_ul_firewall_status_enum_v01 is_success; + /* + * Indicates whether the uplink firewall rules are configured + * successfully. + */ + + uint32_t mux_id; + /* + * QMAP Mux ID. As a part of the QMAP protocol, + * several data calls may be multiplexed over the same physical + * transport channel. This identifier is used to identify one + * such data call. The maximum value for this identifier is 255. + */ +}; + +/* + * Indication Message; Requestes remote IPA driver to install + * uplink firewall rules. + */ +struct ipa_configure_ul_firewall_rules_ind_msg_v01 { + + struct ipa_ul_firewall_config_result_type_v01 result; +}; /* Message */ + + /*Service Message Definition*/ #define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020 #define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020 @@ -1655,6 +1938,13 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 { #define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035 #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037 +#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038 +#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038 +#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039 +#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039 +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A /* add for max length*/ #define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134 @@ -1663,7 +1953,7 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 { #define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7 #define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369 #define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783 -#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834 +#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 870 #define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7 #define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7 #define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15 @@ -1696,6 +1986,15 @@ struct ipa_install_fltr_rule_resp_ex_msg_v01 { #define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 22685 #define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523 +#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4 +#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7 + +#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18 +#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595 + +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875 +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7 +#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11 /* Service Object Accessor */ #endif/* IPA_QMI_SERVICE_V01_H */ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 25b7a678f9ef..46436543ad28 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -1251,8 +1251,10 @@ retry: timeo = MAX_SCHEDULE_TIMEOUT; ret = netlink_attachskb(sock, nc, &timeo, NULL); - if (ret == 1) + if (ret == 1) { + sock = NULL; goto retry; + } if (ret) { sock = NULL; nc = NULL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 85de5094b936..c97bce6a0e0e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -765,6 +765,11 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) if (err) return err; + if (is_pointer_value(env, insn->src_reg)) { + verbose("R%d leaks addr into mem\n", insn->src_reg); + return -EACCES; + } + /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); diff --git a/kernel/events/core.c b/kernel/events/core.c index 7fee87daac56..69f8f683138a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1693,7 +1693,33 @@ static int __perf_remove_from_context(void *info) return 0; } -/* + +#ifdef CONFIG_SMP +static void perf_retry_remove(struct perf_event *event, + struct remove_event *rep) +{ + int up_ret; + /* + * CPU was offline. Bring it online so we can + * gracefully exit a perf context. + */ + up_ret = cpu_up(event->cpu); + if (!up_ret) + /* Try the remove call once again. */ + cpu_function_call(event->cpu, __perf_remove_from_context, + rep); + else + pr_err("Failed to bring up CPU: %d, ret: %d\n", + event->cpu, up_ret); +} +#else +static void perf_retry_remove(struct perf_event *event, + struct remove_event *rep) +{ +} +#endif + + /* * Remove the event from a task's (or a CPU's) list of events. * * CPU events are removed with a smp call. For task events we only @@ -1728,6 +1754,9 @@ static void __ref perf_remove_from_context(struct perf_event *event, */ ret = cpu_function_call(event->cpu, __perf_remove_from_context, &re); + if (ret == -ENXIO) + perf_retry_remove(event, &re); + return; } @@ -3408,22 +3437,27 @@ u64 perf_event_read_local(struct perf_event *event) static int perf_event_read(struct perf_event *event, bool group) { - int ret = 0; + int event_cpu, ret = 0; /* * If event is enabled and currently active on a CPU, update the * value in the event structure: */ + event_cpu = READ_ONCE(event->oncpu); + if (event->state == PERF_EVENT_STATE_ACTIVE && - !cpu_isolated(event->oncpu)) { + !cpu_isolated(event_cpu)) { struct perf_read_data data = { .event = event, .group = group, .ret = 0, }; + + if ((unsigned int)event_cpu >= nr_cpu_ids) + return 0; if (!event->attr.exclude_idle || - !per_cpu(is_idle, event->oncpu)) { - smp_call_function_single(event->oncpu, + !per_cpu(is_idle, event_cpu)) { + smp_call_function_single(event_cpu, __perf_event_read, &data, 1); ret = data.ret; } @@ -7109,6 +7143,8 @@ static struct pmu perf_swevent = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, + + .events_across_hotplug = 1, }; #ifdef CONFIG_EVENT_TRACING @@ -7230,6 +7266,8 @@ static struct pmu perf_tracepoint = { .start = perf_swevent_start, .stop = perf_swevent_stop, .read = perf_swevent_read, + + .events_across_hotplug = 1, }; static inline void perf_tp_register(void) @@ -7517,6 +7555,8 @@ static struct pmu perf_cpu_clock = { .start = cpu_clock_event_start, .stop = cpu_clock_event_stop, .read = cpu_clock_event_read, + + .events_across_hotplug = 1, }; /* @@ -7598,6 +7638,8 @@ static struct pmu perf_task_clock = { .start = task_clock_event_start, .stop = task_clock_event_stop, .read = task_clock_event_read, + + .events_across_hotplug = 1, }; static void perf_pmu_nop_void(struct pmu *pmu) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 92ce5f4ccc26..7da5b674d16e 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -614,6 +614,8 @@ static struct pmu perf_breakpoint = { .start = hw_breakpoint_start, .stop = hw_breakpoint_stop, .read = hw_breakpoint_pmu_read, + + .events_across_hotplug = 1, }; int __init init_hw_breakpoint(void) diff --git a/kernel/extable.c b/kernel/extable.c index e820ccee9846..4f06fc34313f 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -66,7 +66,7 @@ static inline int init_kernel_text(unsigned long addr) return 0; } -int core_kernel_text(unsigned long addr) +int notrace core_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)_etext) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0071785e698b..18f4fb65cd1d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6924,6 +6924,9 @@ enum s_alloc { * Build an iteration mask that can exclude certain CPUs from the upwards * domain traversal. * + * Only CPUs that can arrive at this group should be considered to continue + * balancing. + * * Asymmetric node setups can result in situations where the domain tree is of * unequal depth, make sure to skip domains that already cover the entire * range. @@ -6935,18 +6938,31 @@ enum s_alloc { */ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) { - const struct cpumask *span = sched_domain_span(sd); + const struct cpumask *sg_span = sched_group_cpus(sg); struct sd_data *sdd = sd->private; struct sched_domain *sibling; int i; - for_each_cpu(i, span) { + for_each_cpu(i, sg_span) { sibling = *per_cpu_ptr(sdd->sd, i); - if (!cpumask_test_cpu(i, sched_domain_span(sibling))) + + /* + * Can happen in the asymmetric case, where these siblings are + * unused. The mask will not be empty because those CPUs that + * do have the top domain _should_ span the domain. + */ + if (!sibling->child) + continue; + + /* If we would not end up here, we can't continue from here */ + if (!cpumask_equal(sg_span, sched_domain_span(sibling->child))) continue; cpumask_set_cpu(i, sched_group_mask(sg)); } + + /* We must not have empty masks here */ + WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg))); } /* diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 75bfbb336722..e12309c1b07b 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -47,6 +47,7 @@ struct sugov_policy { s64 up_rate_delay_ns; s64 down_rate_delay_ns; unsigned int next_freq; + unsigned int cached_raw_freq; /* The next fields are only needed if fast switch cannot be used. */ struct irq_work irq_work; @@ -63,7 +64,6 @@ struct sugov_cpu { struct update_util_data update_util; struct sugov_policy *sg_policy; - unsigned int cached_raw_freq; unsigned long iowait_boost; unsigned long iowait_boost_max; u64 last_update; @@ -72,6 +72,11 @@ struct sugov_cpu { unsigned long util; unsigned long max; unsigned int flags; + + /* The field below is for single-CPU policies only. */ +#ifdef CONFIG_NO_HZ_COMMON + unsigned long saved_idle_calls; +#endif }; static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); @@ -127,22 +132,20 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) return; + if (sg_policy->next_freq == next_freq) + return; + + sg_policy->next_freq = next_freq; + sg_policy->last_freq_update_time = time; + if (policy->fast_switch_enabled) { - if (sg_policy->next_freq == next_freq) { - trace_cpu_frequency(policy->cur, smp_processor_id()); - return; - } - sg_policy->next_freq = next_freq; - sg_policy->last_freq_update_time = time; next_freq = cpufreq_driver_fast_switch(policy, next_freq); if (next_freq == CPUFREQ_ENTRY_INVALID) return; policy->cur = next_freq; trace_cpu_frequency(next_freq, smp_processor_id()); - } else if (sg_policy->next_freq != next_freq) { - sg_policy->next_freq = next_freq; - sg_policy->last_freq_update_time = time; + } else { sg_policy->work_in_progress = true; irq_work_queue(&sg_policy->irq_work); } @@ -150,7 +153,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, /** * get_next_freq - Compute a new frequency for a given cpufreq policy. - * @sg_cpu: schedutil cpu object to compute the new frequency for. + * @sg_policy: schedutil policy object to compute the new frequency for. * @util: Current CPU utilization. * @max: CPU capacity. * @@ -170,19 +173,18 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time, * next_freq (as calculated above) is returned, subject to policy min/max and * cpufreq driver limitations. */ -static unsigned int get_next_freq(struct sugov_cpu *sg_cpu, unsigned long util, - unsigned long max) +static unsigned int get_next_freq(struct sugov_policy *sg_policy, + unsigned long util, unsigned long max) { - struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; unsigned int freq = arch_scale_freq_invariant() ? policy->cpuinfo.max_freq : policy->cur; freq = (freq + (freq >> 2)) * util / max; - if (freq == sg_cpu->cached_raw_freq && sg_policy->next_freq != UINT_MAX) + if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX) return sg_policy->next_freq; - sg_cpu->cached_raw_freq = freq; + sg_policy->cached_raw_freq = freq; return cpufreq_driver_resolve_freq(policy, freq); } @@ -248,6 +250,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, sg_cpu->iowait_boost >>= 1; } +#ifdef CONFIG_NO_HZ_COMMON +static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) +{ + unsigned long idle_calls = tick_nohz_get_idle_calls(); + bool ret = idle_calls == sg_cpu->saved_idle_calls; + + sg_cpu->saved_idle_calls = idle_calls; + return ret; +} +#else +static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } +#endif /* CONFIG_NO_HZ_COMMON */ + static void sugov_update_single(struct update_util_data *hook, u64 time, unsigned int flags) { @@ -256,6 +271,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, struct cpufreq_policy *policy = sg_policy->policy; unsigned long util, max; unsigned int next_f; + bool busy; sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; @@ -263,40 +279,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (!sugov_should_update_freq(sg_policy, time)) return; + busy = sugov_cpu_is_busy(sg_cpu); + if (flags & SCHED_CPUFREQ_DL) { next_f = policy->cpuinfo.max_freq; } else { sugov_get_util(&util, &max, time); sugov_iowait_boost(sg_cpu, &util, &max); - next_f = get_next_freq(sg_cpu, util, max); + next_f = get_next_freq(sg_policy, util, max); + /* + * Do not reduce the frequency if the CPU has not been idle + * recently, as the reduction is likely to be premature then. + */ + if (busy && next_f < sg_policy->next_freq) + next_f = sg_policy->next_freq; } sugov_update_commit(sg_policy, time, next_f); } -static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, - unsigned long util, unsigned long max, - unsigned int flags) +static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu) { struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct cpufreq_policy *policy = sg_policy->policy; - unsigned int max_f = policy->cpuinfo.max_freq; u64 last_freq_update_time = sg_policy->last_freq_update_time; + unsigned long util = 0, max = 1; unsigned int j; - if (flags & SCHED_CPUFREQ_DL) - return max_f; - - sugov_iowait_boost(sg_cpu, &util, &max); - for_each_cpu(j, policy->cpus) { - struct sugov_cpu *j_sg_cpu; + struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); unsigned long j_util, j_max; s64 delta_ns; - if (j == smp_processor_id()) - continue; - - j_sg_cpu = &per_cpu(sugov_cpu, j); /* * If the CPU utilization was last updated before the previous * frequency update and the time elapsed between the last update @@ -310,7 +323,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, continue; } if (j_sg_cpu->flags & SCHED_CPUFREQ_DL) - return max_f; + return policy->cpuinfo.max_freq; j_util = j_sg_cpu->util; j_max = j_sg_cpu->max; @@ -322,7 +335,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, sugov_iowait_boost(j_sg_cpu, &util, &max); } - return get_next_freq(sg_cpu, util, max); + return get_next_freq(sg_policy, util, max); } static void sugov_update_shared(struct update_util_data *hook, u64 time, @@ -345,7 +358,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, sg_cpu->last_update = time; if (sugov_should_update_freq(sg_policy, time)) { - next_f = sugov_next_freq_shared(sg_cpu, util, max, flags); + if (flags & SCHED_CPUFREQ_DL) + next_f = sg_policy->policy->cpuinfo.max_freq; + else + next_f = sugov_next_freq_shared(sg_cpu); + sugov_update_commit(sg_policy, time, next_f); } @@ -371,15 +388,15 @@ static void sugov_irq_work(struct irq_work *irq_work) sg_policy = container_of(irq_work, struct sugov_policy, irq_work); /* - * For Real Time and Deadline tasks, schedutil governor shoots the - * frequency to maximum. And special care must be taken to ensure that - * this kthread doesn't result in that. + * For RT and deadline tasks, the schedutil governor shoots the + * frequency to maximum. Special care must be taken to ensure that this + * kthread doesn't result in the same behavior. * * This is (mostly) guaranteed by the work_in_progress flag. The flag is - * updated only at the end of the sugov_work() and before that schedutil - * rejects all other frequency scaling requests. + * updated only at the end of the sugov_work() function and before that + * the schedutil governor rejects all other frequency scaling requests. * - * Though there is a very rare case where the RT thread yields right + * There is a very rare case though, where the RT thread yields right * after the work_in_progress flag is cleared. The effects of that are * neglected for now. */ @@ -489,15 +506,12 @@ static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) return NULL; sg_policy->policy = policy; - init_irq_work(&sg_policy->irq_work, sugov_irq_work); - mutex_init(&sg_policy->work_lock); raw_spin_lock_init(&sg_policy->update_lock); return sg_policy; } static void sugov_policy_free(struct sugov_policy *sg_policy) { - mutex_destroy(&sg_policy->work_lock); kfree(sg_policy); } @@ -531,6 +545,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) sg_policy->thread = thread; kthread_bind_mask(thread, policy->related_cpus); + init_irq_work(&sg_policy->irq_work, sugov_irq_work); + mutex_init(&sg_policy->work_lock); + wake_up_process(thread); return 0; @@ -544,6 +561,7 @@ static void sugov_kthread_stop(struct sugov_policy *sg_policy) flush_kthread_worker(&sg_policy->worker); kthread_stop(sg_policy->thread); + mutex_destroy(&sg_policy->work_lock); } static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) @@ -578,9 +596,13 @@ static int sugov_init(struct cpufreq_policy *policy) if (policy->governor_data) return -EBUSY; + cpufreq_enable_fast_switch(policy); + sg_policy = sugov_policy_alloc(policy); - if (!sg_policy) - return -ENOMEM; + if (!sg_policy) { + ret = -ENOMEM; + goto disable_fast_switch; + } ret = sugov_kthread_create(sg_policy); if (ret) @@ -623,13 +645,11 @@ static int sugov_init(struct cpufreq_policy *policy) if (ret) goto fail; - out: +out: mutex_unlock(&global_tunables_lock); - - cpufreq_enable_fast_switch(policy); return 0; - fail: +fail: policy->governor_data = NULL; sugov_tunables_free(tunables); @@ -640,6 +660,10 @@ free_sg_policy: mutex_unlock(&global_tunables_lock); sugov_policy_free(sg_policy); + +disable_fast_switch: + cpufreq_disable_fast_switch(policy); + pr_err("initialization failed (error %d)\n", ret); return ret; } @@ -650,8 +674,6 @@ static int sugov_exit(struct cpufreq_policy *policy) struct sugov_tunables *tunables = sg_policy->tunables; unsigned int count; - cpufreq_disable_fast_switch(policy); - mutex_lock(&global_tunables_lock); count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); @@ -664,6 +686,7 @@ static int sugov_exit(struct cpufreq_policy *policy) sugov_kthread_stop(sg_policy); sugov_policy_free(sg_policy); + cpufreq_disable_fast_switch(policy); return 0; } @@ -681,25 +704,19 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->next_freq = UINT_MAX; sg_policy->work_in_progress = false; sg_policy->need_freq_update = false; + sg_policy->cached_raw_freq = 0; for_each_cpu(cpu, policy->cpus) { struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); + memset(sg_cpu, 0, sizeof(*sg_cpu)); sg_cpu->sg_policy = sg_policy; - if (policy_is_shared(policy)) { - sg_cpu->util = 0; - sg_cpu->max = 0; - sg_cpu->flags = SCHED_CPUFREQ_DL; - sg_cpu->last_update = 0; - sg_cpu->cached_raw_freq = 0; - sg_cpu->iowait_boost = 0; - sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, - sugov_update_shared); - } else { - cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, - sugov_update_single); - } + sg_cpu->flags = SCHED_CPUFREQ_DL; + sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, + policy_is_shared(policy) ? + sugov_update_shared : + sugov_update_single); } return 0; } @@ -714,9 +731,10 @@ static int sugov_stop(struct cpufreq_policy *policy) synchronize_sched(); - irq_work_sync(&sg_policy->irq_work); - kthread_cancel_work_sync(&sg_policy->work); - + if (!policy->fast_switch_enabled) { + irq_work_sync(&sg_policy->irq_work); + kthread_cancel_work_sync(&sg_policy->work); + } return 0; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c03d51a017bf..ee095f4e7230 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1823,6 +1823,7 @@ static int find_lowest_rq_hmp(struct task_struct *task) * the best one based on our affinity and topology. */ +retry: for_each_sched_cluster(cluster) { if (boost_on_big && cluster->capacity != max_possible_capacity) continue; @@ -1830,6 +1831,15 @@ static int find_lowest_rq_hmp(struct task_struct *task) cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask); cpumask_andnot(&candidate_mask, &candidate_mask, cpu_isolated_mask); + /* + * When placement boost is active, if there is no eligible CPU + * in the highest capacity cluster, we fallback to the other + * clusters. So clear the CPUs of the traversed cluster from + * the lowest_mask. + */ + if (unlikely(boost_on_big)) + cpumask_andnot(lowest_mask, lowest_mask, + &cluster->cpus); if (cpumask_empty(&candidate_mask)) continue; @@ -1869,6 +1879,11 @@ static int find_lowest_rq_hmp(struct task_struct *task) break; } + if (unlikely(boost_on_big && best_cpu == -1)) { + boost_on_big = 0; + goto retry; + } + return best_cpu; } #endif /* CONFIG_SCHED_HMP */ diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 6e053bd9830c..92c3aae8e056 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -72,7 +72,15 @@ static cpumask_t mpc_mask = CPU_MASK_ALL; __read_mostly unsigned int walt_ravg_window = 20000000; /* Min window size (in ns) = 10ms */ +#ifdef CONFIG_HZ_300 +/* + * Tick interval becomes to 3333333 due to + * rounding error when HZ=300. + */ +#define MIN_SCHED_RAVG_WINDOW (3333333 * 6) +#else #define MIN_SCHED_RAVG_WINDOW 10000000 +#endif /* Max window size (in ns) = 1s */ #define MAX_SCHED_RAVG_WINDOW 1000000000 diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f27d2ba78d14..8576e6385d63 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2380,9 +2380,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp, if (write) { if (*negp) return -EINVAL; + if (*lvalp > UINT_MAX) + return -EINVAL; *valp = *lvalp; } else { unsigned int val = *valp; + *negp = false; *lvalp = (unsigned long)val; } return 0; diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index ec2102104cb8..333f627a3a3b 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -896,6 +896,18 @@ ktime_t tick_nohz_get_sleep_length(void) return ts->sleep_length; } +/** + * tick_nohz_get_idle_calls - return the current idle calls counter value + * + * Called from the schedutil frequency scaling governor in scheduler context. + */ +unsigned long tick_nohz_get_idle_calls(void) +{ + struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); + + return ts->idle_calls; +} + static void tick_nohz_account_idle_ticks(struct tick_sched *ts) { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 12ea4ea619ee..e9092a0247bf 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -659,30 +659,25 @@ static int create_trace_kprobe(int argc, char **argv) pr_info("Probe point is not specified.\n"); return -EINVAL; } - if (isdigit(argv[1][0])) { - if (is_return) { - pr_info("Return probe point must be a symbol.\n"); - return -EINVAL; - } - /* an address specified */ - ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr); - if (ret) { - pr_info("Failed to parse address.\n"); - return ret; - } - } else { + + /* try to parse an address. if that fails, try to read the + * input as a symbol. */ + if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) { /* a symbol specified */ symbol = argv[1]; /* TODO: support .init module functions */ ret = traceprobe_split_symbol_offset(symbol, &offset); if (ret) { - pr_info("Failed to parse symbol.\n"); + pr_info("Failed to parse either an address or a symbol.\n"); return ret; } if (offset && is_return) { pr_info("Return probe must be used without offset.\n"); return -EINVAL; } + } else if (is_return) { + pr_info("Return probe point must be a symbol.\n"); + return -EINVAL; } argc -= 2; argv += 2; diff --git a/mm/mmap.c b/mm/mmap.c index 092729c2cb72..16743bf76a88 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2206,7 +2206,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* Guard against exceeding limits of the address space. */ address &= PAGE_MASK; - if (address >= TASK_SIZE) + if (address >= (TASK_SIZE & PAGE_MASK)) return -ENOMEM; address += PAGE_SIZE; diff --git a/mm/vmscan.c b/mm/vmscan.c index 94fecacf0ddc..5f6e29f25af9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2676,7 +2676,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) if (!populated_zone(zone)) continue; - classzone_idx = requested_highidx; + classzone_idx = gfp_zone(sc->gfp_mask); while (!populated_zone(zone->zone_pgdat->node_zones + classzone_idx)) classzone_idx--; diff --git a/net/core/dev.c b/net/core/dev.c index 9a3aaba15c5a..1b7fb54d5d9a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4383,6 +4383,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type) } EXPORT_SYMBOL(gro_find_complete_by_type); +static void napi_skb_free_stolen_head(struct sk_buff *skb) +{ + skb_dst_drop(skb); + kmem_cache_free(skbuff_head_cache, skb); +} + static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) { switch (ret) { @@ -4396,12 +4402,10 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { - skb_dst_drop(skb); - kmem_cache_free(skbuff_head_cache, skb); - } else { + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else __kfree_skb(skb); - } break; case GRO_HELD: @@ -4467,10 +4471,16 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, break; case GRO_DROP: - case GRO_MERGED_FREE: napi_reuse_skb(napi, skb); break; + case GRO_MERGED_FREE: + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + napi_skb_free_stolen_head(skb); + else + napi_reuse_skb(napi, skb); + break; + case GRO_MERGED: break; } @@ -7088,8 +7098,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } - storage->rx_dropped += atomic_long_read(&dev->rx_dropped); - storage->tx_dropped += atomic_long_read(&dev->tx_dropped); + storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); + storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); return storage; } EXPORT_SYMBOL(dev_get_stats); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 57e5938fd669..9bdd7847ef3a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2272,6 +2272,8 @@ int tcp_disconnect(struct sock *sk, int flags) tcp_init_send_head(sk); memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); + dst_release(sk->sk_rx_dst); + sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 710101376a76..69d52fee247e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2166,8 +2166,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int cnt, oldcnt; - int err; + int cnt, oldcnt, lost; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; @@ -2207,9 +2206,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) break; mss = tcp_skb_mss(skb); - err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, - mss, GFP_ATOMIC); - if (err < 0) + /* If needed, chop off the prefix to mark as lost. */ + lost = (packets - oldcnt) * mss; + if (lost < skb->len && + tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) break; cnt = packets; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 55e928819846..4b707ad4ffbd 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1801,17 +1801,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) { - if (ifp->flags&IFA_F_PERMANENT) { - spin_lock_bh(&ifp->lock); - addrconf_del_dad_work(ifp); - ifp->flags |= IFA_F_TENTATIVE; - if (dad_failed) - ifp->flags |= IFA_F_DADFAILED; - spin_unlock_bh(&ifp->lock); - if (dad_failed) - ipv6_ifa_notify(0, ifp); - in6_ifa_put(ifp); - } else if (ifp->flags&IFA_F_TEMPORARY) { + if (ifp->flags&IFA_F_TEMPORARY) { struct inet6_ifaddr *ifpub; spin_lock_bh(&ifp->lock); ifpub = ifp->ifpub; @@ -1824,6 +1814,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) spin_unlock_bh(&ifp->lock); } ipv6_del_addr(ifp); + } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) { + spin_lock_bh(&ifp->lock); + addrconf_del_dad_work(ifp); + ifp->flags |= IFA_F_TENTATIVE; + if (dad_failed) + ifp->flags |= IFA_F_DADFAILED; + spin_unlock_bh(&ifp->lock); + if (dad_failed) + ipv6_ifa_notify(0, ifp); + in6_ifa_put(ifp); } else { ipv6_del_addr(ifp); } @@ -3212,6 +3212,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct inet6_dev *idev = __in6_dev_get(dev); + struct net *net = dev_net(dev); int run_pending = 0; int err; @@ -3227,7 +3228,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, case NETDEV_CHANGEMTU: /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) { - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); break; } @@ -3340,7 +3341,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * IPV6_MIN_MTU stop IPv6 on this interface. */ if (dev->mtu < IPV6_MIN_MTU) - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, dev != net->loopback_dev); } break; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1ac06723f0d7..f60e8caea767 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -767,10 +767,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, goto next_iter; } - if (iter->dst.dev == rt->dst.dev && - iter->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&iter->rt6i_gateway, - &rt->rt6i_gateway)) { + if (rt6_duplicate_nexthop(iter, rt)) { if (rt->rt6i_nsiblings) rt->rt6i_nsiblings = 0; if (!(iter->rt6i_flags & RTF_EXPIRES)) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e98613d2f34f..dd37fe0b6a49 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2822,17 +2822,11 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; - struct rt6_info *rtnh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ - rtnh = nh->rt6_info; - - if (rtnh->dst.dev == rt->dst.dev && - rtnh->rt6i_idev == rt->rt6i_idev && - ipv6_addr_equal(&rtnh->rt6i_gateway, - &rt->rt6i_gateway)) + if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 0936a4a32b47..e353e3255206 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -78,7 +78,7 @@ int rds_tcp_accept_one(struct socket *sock) struct inet_sock *inet; struct rds_tcp_connection *rs_tcp; - ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family, + ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); if (ret) diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c index ae60f35b363d..b17556c346ce 100644 --- a/net/rmnet_data/rmnet_data_handlers.c +++ b/net/rmnet_data/rmnet_data_handlers.c @@ -476,10 +476,12 @@ static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb, if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED))) skb->ip_summed |= CHECKSUM_UNNECESSARY; - else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION - && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT - && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET - && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) { + else if (ckresult != + RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION && + ckresult != RMNET_MAP_CHECKSUM_VALIDATION_FAILED && + ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT && + ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET && + ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) { rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM); return RX_HANDLER_CONSUMED; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 5474dc7c125a..ca4ecc246347 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, return sch; } + /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ + if (ops->destroy) + ops->destroy(sch); err_out3: dev_put(dev); kfree((char *) sch - sch->padded); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 13d6f83ec491..45d4b2f22f62 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -636,7 +636,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN * sizeof(u32)); if (!q->hhf_arrays[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } @@ -647,7 +649,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt) q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN / BITS_PER_BYTE); if (!q->hhf_valid_bits[i]) { - hhf_destroy(sch); + /* Note: hhf_destroy() will be called + * by our caller. + */ return -ENOMEM; } } diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index 3e82f047caaf..d9c84328e7eb 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c @@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdiscs, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) + if (!priv->qdiscs) return -ENOMEM; for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { @@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(ntx + 1))); - if (qdisc == NULL) - goto err; + if (!qdisc) + return -ENOMEM; priv->qdiscs[ntx] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mq_destroy(sch); - return -ENOMEM; } static void mq_attach(struct Qdisc *sch) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index ad70ecf57ce7..66bccc5ff4ea 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) /* pre-allocate qdisc, attachment can't fail */ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), GFP_KERNEL); - if (priv->qdiscs == NULL) { - err = -ENOMEM; - goto err; - } + if (!priv->qdiscs) + return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { dev_queue = netdev_get_tx_queue(dev, i); qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops, TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(i + 1))); - if (qdisc == NULL) { - err = -ENOMEM; - goto err; - } + if (!qdisc) + return -ENOMEM; + priv->qdiscs[i] = qdisc; qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; } @@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) priv->hw_owned = 1; err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); if (err) - goto err; + return err; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) @@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) sch->flags |= TCQ_F_MQROOT; return 0; - -err: - mqprio_destroy(sch); - return err; } static void mqprio_attach(struct Qdisc *sch) diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 498f0a2cb47f..4431e2833e45 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -742,9 +742,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); if (!q->ht || !q->slots) { - sfq_destroy(sch); + /* Note: sfq_destroy() will be called by our caller */ return -ENOMEM; } + for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; diff --git a/net/wireless/db.txt b/net/wireless/db.txt index 1afd88a87e0f..0727a6e9f780 100644 --- a/net/wireless/db.txt +++ b/net/wireless/db.txt @@ -539,6 +539,12 @@ country GH: DFS-FCC (5490 - 5730 @ 160), (24), DFS (5735 - 5835 @ 80), (30) +country GI: DFS-ETSI + (2402 - 2482 @ 40), (20) + (5170 - 5250 @ 80), (23), AUTO-BW + (5250 - 5330 @ 80), (23), DFS, AUTO-BW + (5490 - 5710 @ 160), (30), DFS + country GL: DFS-ETSI (2402 - 2482 @ 40), (20) (5170 - 5250 @ 80), (23), AUTO-BW @@ -679,10 +685,6 @@ country IN: (5170 - 5330 @ 160), (23) (5735 - 5835 @ 80), (30) -country IR: - (2402 - 2482 @ 40), (20) - (5735 - 5835 @ 80), (30) - country IS: DFS-ETSI (2402 - 2482 @ 40), (20) (5170 - 5250 @ 80), (23), AUTO-BW @@ -1417,7 +1419,7 @@ country UG: DFS-FCC country US: DFS-FCC (2402 - 2472 @ 40), (30) - (5170 - 5250 @ 80), (24), AUTO-BW + (5170 - 5250 @ 80), (30), AUTO-BW (5250 - 5330 @ 80), (24), DFS, AUTO-BW (5490 - 5730 @ 160), (24), DFS (5735 - 5835 @ 80), (30) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d0d09c290ff8..2a9ec3e05c73 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -302,8 +302,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, - [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, - .len = WLAN_PMKID_LEN }, + [NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, @@ -359,6 +358,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 }, [NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 }, + [NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 }, [NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 }, [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, @@ -6124,6 +6124,10 @@ static int validate_scan_freqs(struct nlattr *freqs) struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; + nla_for_each_nested(attr1, freqs, tmp1) + if (nla_len(attr1) != sizeof(u32)) + return 0; + nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c72bcb33496a..02996be239bc 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3410,7 +3410,7 @@ sub process { $fixedline =~ s/\s*=\s*$/ = {/; fix_insert_line($fixlinenr, $fixedline); $fixedline = $line; - $fixedline =~ s/^(.\s*){\s*/$1/; + $fixedline =~ s/^(.\s*)\{\s*/$1/; fix_insert_line($fixlinenr, $fixedline); } } @@ -3760,7 +3760,7 @@ sub process { my $fixedline = rtrim($prevrawline) . " {"; fix_insert_line($fixlinenr, $fixedline); $fixedline = $rawline; - $fixedline =~ s/^(.\s*){\s*/$1\t/; + $fixedline =~ s/^(.\s*)\{\s*/$1\t/; if ($fixedline !~ /^\+\s*$/) { fix_insert_line($fixlinenr, $fixedline); } @@ -4249,7 +4249,7 @@ sub process { if (ERROR("SPACING", "space required before the open brace '{'\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/; + $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/; } } diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 696ccfa08d10..31898856682e 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c @@ -428,7 +428,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key, static struct key *request_master_key(struct encrypted_key_payload *epayload, const u8 **master_key, size_t *master_keylen) { - struct key *mkey = NULL; + struct key *mkey = ERR_PTR(-EINVAL); if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { diff --git a/sound/soc/msm/apq8096-auto.c b/sound/soc/msm/apq8096-auto.c index 2ae78f75c340..b1dff8764618 100644 --- a/sound/soc/msm/apq8096-auto.c +++ b/sound/soc/msm/apq8096-auto.c @@ -3481,12 +3481,13 @@ static struct snd_soc_dai_link apq8096_common_dai_links[] = { .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, { - .name = "MSM8996 Compress9", - .stream_name = "Compress9", + .name = "MSM8996 ULL NOIRQ 2", + .stream_name = "MM_NOIRQ_2", .cpu_dai_name = "MultiMedia16", - .platform_name = "msm-compress-dsp", + .platform_name = "msm-pcm-dsp-noirq", .dynamic = 1, .dpcm_playback = 1, + .dpcm_capture = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c index 9be628cc2351..5facafdc7729 100644 --- a/sound/soc/msm/msm-dai-fe.c +++ b/sound/soc/msm/msm-dai-fe.c @@ -2496,8 +2496,21 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { .rate_min = 8000, .rate_max = 384000, }, + .capture = { + .stream_name = "MultiMedia16 Capture", + .aif_name = "MM_UL16", + .rates = (SNDRV_PCM_RATE_8000_48000| + SNDRV_PCM_RATE_KNOT), + .formats = (SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .channels_min = 1, + .channels_max = 8, + .rate_min = 8000, + .rate_max = 48000, + }, .ops = &msm_fe_Multimedia_dai_ops, - .compress_new = snd_soc_new_compress, .name = "MultiMedia16", .probe = fe_dai_probe, }, diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c index 4eafd322d50d..010dfa3322a0 100644 --- a/sound/soc/msm/msm8996.c +++ b/sound/soc/msm/msm8996.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2910,12 +2910,13 @@ static struct snd_soc_dai_link msm8996_common_dai_links[] = { .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, { - .name = "MSM8996 Compress9", - .stream_name = "Compress9", + .name = "MSM8996 ULL NOIRQ_2", + .stream_name = "MM_NOIRQ_2", .cpu_dai_name = "MultiMedia16", - .platform_name = "msm-compress-dsp", + .platform_name = "msm-pcm-dsp-noirq", .dynamic = 1, .dpcm_playback = 1, + .dpcm_capture = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c index 87ad98ec1913..b8bd32e046a3 100644 --- a/sound/soc/msm/msm8998.c +++ b/sound/soc/msm/msm8998.c @@ -5315,12 +5315,13 @@ static struct snd_soc_dai_link msm_common_dai_links[] = { .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, { - .name = MSM_DAILINK_NAME(Compress9), - .stream_name = "Compress9", + .name = MSM_DAILINK_NAME(ULL_NOIRQ_2), + .stream_name = "MM_NOIRQ_2", .cpu_dai_name = "MultiMedia16", - .platform_name = "msm-compress-dsp", + .platform_name = "msm-pcm-dsp-noirq", .dynamic = 1, .dpcm_playback = 1, + .dpcm_capture = 1, .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .codec_dai_name = "snd-soc-dummy-dai", diff --git a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c index e312a879b86a..1286d3185780 100644 --- a/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-audio-effects-q6-v2.c @@ -155,7 +155,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "VIRT ENABLE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = @@ -183,7 +183,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "VIRT STRENGTH", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = @@ -211,7 +211,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "VIRT OUT_TYPE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = @@ -239,7 +239,7 @@ int msm_audio_effects_virtualizer_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "VIRT GAIN_ADJUST", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_VIRTUALIZER; *updt_params++ = @@ -318,7 +318,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_ENABLE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -346,7 +346,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_MODE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -374,7 +374,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_PRESET", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -402,7 +402,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_WET_MIX", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -430,7 +430,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_GAIN_ADJUST", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -458,7 +458,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_ROOM_LEVEL", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -486,7 +486,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_ROOM_HF_LEVEL", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -514,7 +514,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_DECAY_TIME", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -542,7 +542,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_DECAY_HF_RATIO", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -570,7 +570,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_REFLECTIONS_LEVEL", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -598,7 +598,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_REFLECTIONS_DELAY", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -626,7 +626,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_LEVEL", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -654,7 +654,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_DELAY", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -682,7 +682,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_DIFFUSION", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -710,7 +710,7 @@ int msm_audio_effects_reverb_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "REVERB_DENSITY", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_REVERB; *updt_params++ = @@ -790,7 +790,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "BASS_BOOST_ENABLE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = @@ -818,7 +818,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "BASS_BOOST_MODE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = @@ -846,7 +846,7 @@ int msm_audio_effects_bass_boost_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "BASS_BOOST_STRENGTH", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_BASS_BOOST; *updt_params++ = @@ -924,7 +924,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "PBE_ENABLE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_PBE; *updt_params++ = @@ -950,7 +950,7 @@ int msm_audio_effects_pbe_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "PBE_PARAM", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_PBE; *updt_params++ = @@ -1035,7 +1035,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "EQ_ENABLE", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = @@ -1103,7 +1103,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "EQ_CONFIG", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = @@ -1154,7 +1154,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "EQ_BAND_INDEX", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = @@ -1186,7 +1186,7 @@ int msm_audio_effects_popless_eq_handler(struct audio_client *ac, MAX_INBAND_PARAM_SZ, "EQ_SINGLE_BAND_FREQ", rc); if (rc != 0) - break; + goto invalid_config; *updt_params++ = AUDPROC_MODULE_ID_POPLESS_EQUALIZER; *updt_params++ = @@ -1276,7 +1276,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, "VOLUME/VOLUME2_GAIN_2CH", rc); if (rc != 0) - break; + goto invalid_config; if (instance == SOFT_VOLUME_INSTANCE_2) *updt_params++ = ASM_MODULE_ID_VOL_CTRL2; @@ -1325,7 +1325,7 @@ static int __msm_audio_effects_volume_handler(struct audio_client *ac, "VOLUME/VOLUME2_GAIN_MASTER", rc); if (rc != 0) - break; + goto invalid_config; if (instance == SOFT_VOLUME_INSTANCE_2) *updt_params++ = ASM_MODULE_ID_VOL_CTRL2; diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index e92065d0412d..38937612dcce 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -3675,6 +3675,11 @@ static const struct snd_kcontrol_new ext_ec_ref_mux_ul9 = msm_route_ec_ref_rx_enum[0], msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); +static const struct snd_kcontrol_new ext_ec_ref_mux_ul16 = + SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL16 MUX Mux", + msm_route_ec_ref_rx_enum[0], + msm_routing_ec_ref_rx_get, msm_routing_ec_ref_rx_put); + static const struct snd_kcontrol_new ext_ec_ref_mux_ul17 = SOC_DAPM_ENUM_EXT("AUDIO_REF_EC_UL17 MUX Mux", msm_route_ec_ref_rx_enum[0], @@ -7370,6 +7375,114 @@ static const struct snd_kcontrol_new mmul8_mixer_controls[] = { msm_routing_put_audio_mixer), }; +static const struct snd_kcontrol_new mmul16_mixer_controls[] = { + SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_MI2S_TX", MSM_BACKEND_DAI_PRI_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_MI2S_TX", MSM_BACKEND_DAI_SECONDARY_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("INT2_MI2S_TX", MSM_BACKEND_DAI_INT2_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("INT3_MI2S_TX", MSM_BACKEND_DAI_INT3_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("INTERNAL_FM_TX", MSM_BACKEND_DAI_INT_FM_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("INTERNAL_BT_SCO_TX", MSM_BACKEND_DAI_INT_BT_SCO_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("AFE_PCM_TX", MSM_BACKEND_DAI_AFE_PCM_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("VOC_REC_DL", MSM_BACKEND_DAI_INCALL_RECORD_RX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("VOC_REC_UL", MSM_BACKEND_DAI_INCALL_RECORD_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SLIM_6_TX", MSM_BACKEND_DAI_SLIMBUS_6_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_0", MSM_BACKEND_DAI_PRI_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_1", MSM_BACKEND_DAI_PRI_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_2", MSM_BACKEND_DAI_PRI_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("PRI_TDM_TX_3", MSM_BACKEND_DAI_PRI_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_TDM_TX_0", MSM_BACKEND_DAI_SEC_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_TDM_TX_1", MSM_BACKEND_DAI_SEC_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_TDM_TX_2", MSM_BACKEND_DAI_SEC_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_TDM_TX_3", MSM_BACKEND_DAI_SEC_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("TERT_TDM_TX_0", MSM_BACKEND_DAI_TERT_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("TERT_TDM_TX_1", MSM_BACKEND_DAI_TERT_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("TERT_TDM_TX_2", MSM_BACKEND_DAI_TERT_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("TERT_TDM_TX_3", MSM_BACKEND_DAI_TERT_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_TDM_TX_0", MSM_BACKEND_DAI_QUAT_TDM_TX_0, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_TDM_TX_1", MSM_BACKEND_DAI_QUAT_TDM_TX_1, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_TDM_TX_2", MSM_BACKEND_DAI_QUAT_TDM_TX_2, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_TDM_TX_3", MSM_BACKEND_DAI_QUAT_TDM_TX_3, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SLIM_7_TX", MSM_BACKEND_DAI_SLIMBUS_7_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("USB_AUDIO_TX", MSM_BACKEND_DAI_USB_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("MI2S_TX", MSM_BACKEND_DAI_MI2S_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("AUX_PCM_TX", MSM_BACKEND_DAI_AUXPCM_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("SEC_AUX_PCM_TX", MSM_BACKEND_DAI_SEC_AUXPCM_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), + SOC_SINGLE_EXT("QUAT_AUX_PCM_TX", MSM_BACKEND_DAI_QUAT_AUXPCM_TX, + MSM_FRONTEND_DAI_MULTIMEDIA16, 1, 0, msm_routing_get_audio_mixer, + msm_routing_put_audio_mixer), +}; + static const struct snd_kcontrol_new mmul9_mixer_controls[] = { SOC_SINGLE_EXT("SLIM_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX, MSM_FRONTEND_DAI_MULTIMEDIA9, 1, 0, msm_routing_get_audio_mixer, @@ -11745,6 +11858,7 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { SND_SOC_DAPM_AIF_OUT("MM_UL6", "MultiMedia6 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL8", "MultiMedia8 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL9", "MultiMedia9 Capture", 0, 0, 0, 0), + SND_SOC_DAPM_AIF_OUT("MM_UL16", "MultiMedia16 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL17", "MultiMedia17 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL18", "MultiMedia18 Capture", 0, 0, 0, 0), SND_SOC_DAPM_AIF_OUT("MM_UL19", "MultiMedia19 Capture", 0, 0, 0, 0), @@ -12483,6 +12597,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { mmul8_mixer_controls, ARRAY_SIZE(mmul8_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia9 Mixer", SND_SOC_NOPM, 0, 0, mmul9_mixer_controls, ARRAY_SIZE(mmul9_mixer_controls)), + SND_SOC_DAPM_MIXER("MultiMedia16 Mixer", SND_SOC_NOPM, 0, 0, + mmul16_mixer_controls, ARRAY_SIZE(mmul16_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia17 Mixer", SND_SOC_NOPM, 0, 0, mmul17_mixer_controls, ARRAY_SIZE(mmul17_mixer_controls)), SND_SOC_DAPM_MIXER("MultiMedia18 Mixer", SND_SOC_NOPM, 0, 0, @@ -12817,6 +12933,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = { &ext_ec_ref_mux_ul8), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL9 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul9), + SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL16 MUX", SND_SOC_NOPM, 0, 0, + &ext_ec_ref_mux_ul16), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL17 MUX", SND_SOC_NOPM, 0, 0, &ext_ec_ref_mux_ul17), SND_SOC_DAPM_MUX("AUDIO_REF_EC_UL18 MUX", SND_SOC_NOPM, 0, 0, @@ -13099,6 +13217,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia8 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"}, {"MultiMedia3 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia5 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, + {"MultiMedia16 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia5 Mixer", "SLIM_7_TX", "SLIMBUS_7_TX"}, {"MultiMedia5 Mixer", "SLIM_8_TX", "SLIMBUS_8_TX"}, {"MI2S_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, @@ -13711,6 +13830,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia2 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia3 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia5 Mixer", "MI2S_TX", "MI2S_TX"}, + {"MultiMedia16 Mixer", "MI2S_TX", "MI2S_TX"}, {"MultiMedia1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, {"MultiMedia6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"}, @@ -13731,12 +13851,15 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia1 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, {"MultiMedia3 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"}, {"MultiMedia5 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"}, + {"MultiMedia16 Mixer", "AUX_PCM_TX", "AUX_PCM_TX"}, + {"MultiMedia16 Mixer", "SEC_AUX_PCM_TX", "SEC_AUX_PCM_TX"}, {"MultiMedia1 Mixer", "TERT_AUXPCM_UL_TX", "TERT_AUX_PCM_TX"}, {"MultiMedia3 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"}, {"MultiMedia5 Mixer", "TERT_AUX_PCM_TX", "TERT_AUX_PCM_TX"}, {"MultiMedia1 Mixer", "QUAT_AUXPCM_UL_TX", "QUAT_AUX_PCM_TX"}, {"MultiMedia3 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"}, {"MultiMedia5 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"}, + {"MultiMedia16 Mixer", "QUAT_AUX_PCM_TX", "QUAT_AUX_PCM_TX"}, {"MultiMedia2 Mixer", "SLIM_0_TX", "SLIMBUS_0_TX"}, {"MultiMedia2 Mixer", "SLIM_6_TX", "SLIMBUS_6_TX"}, {"MultiMedia2 Mixer", "SLIM_1_TX", "SLIMBUS_1_TX"}, @@ -13752,9 +13875,11 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia6 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"}, {"MultiMedia3 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"}, {"MultiMedia5 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"}, + {"MultiMedia16 Mixer", "INT2_MI2S_TX", "INT2_MI2S_TX"}, {"MultiMedia6 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"}, {"MultiMedia3 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"}, {"MultiMedia5 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"}, + {"MultiMedia16 Mixer", "INT3_MI2S_TX", "INT3_MI2S_TX"}, {"MultiMedia6 Mixer", "PRI_MI2S_TX", "PRI_MI2S_TX"}, {"MultiMedia6 Mixer", "AUX_PCM_UL_TX", "AUX_PCM_TX"}, {"MultiMedia6 Mixer", "SEC_AUX_PCM_UL_TX", "SEC_AUX_PCM_TX"}, @@ -13935,6 +14060,24 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia6 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"}, {"MultiMedia8 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"}, + {"MultiMedia16 Mixer", "PRI_TDM_TX_0", "PRI_TDM_TX_0"}, + {"MultiMedia16 Mixer", "PRI_TDM_TX_1", "PRI_TDM_TX_1"}, + {"MultiMedia16 Mixer", "PRI_TDM_TX_2", "PRI_TDM_TX_2"}, + {"MultiMedia16 Mixer", "PRI_TDM_TX_3", "PRI_TDM_TX_3"}, + {"MultiMedia16 Mixer", "SEC_TDM_TX_0", "SEC_TDM_TX_0"}, + {"MultiMedia16 Mixer", "SEC_TDM_TX_1", "SEC_TDM_TX_1"}, + {"MultiMedia16 Mixer", "SEC_TDM_TX_2", "SEC_TDM_TX_2"}, + {"MultiMedia16 Mixer", "SEC_TDM_TX_3", "SEC_TDM_TX_3"}, + {"MultiMedia16 Mixer", "TERT_TDM_TX_0", "TERT_TDM_TX_0"}, + {"MultiMedia16 Mixer", "TERT_TDM_TX_1", "TERT_TDM_TX_1"}, + {"MultiMedia16 Mixer", "TERT_TDM_TX_2", "TERT_TDM_TX_2"}, + {"MultiMedia16 Mixer", "TERT_TDM_TX_3", "TERT_TDM_TX_3"}, + {"MultiMedia16 Mixer", "QUAT_TDM_TX_0", "QUAT_TDM_TX_0"}, + {"MultiMedia16 Mixer", "QUAT_TDM_TX_1", "QUAT_TDM_TX_1"}, + {"MultiMedia16 Mixer", "QUAT_TDM_TX_2", "QUAT_TDM_TX_2"}, + {"MultiMedia16 Mixer", "QUAT_TDM_TX_3", "QUAT_TDM_TX_3"}, + {"MultiMedia16 Mixer", "USB_AUDIO_TX", "USB_AUDIO_TX"}, + {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia1", "MM_DL1"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia2", "MM_DL2"}, {"INTERNAL_BT_SCO_RX Audio Mixer", "MultiMedia3", "MM_DL3"}, @@ -14017,8 +14160,10 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia19 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia5 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia8 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, + {"MultiMedia16 Mixer", "INTERNAL_BT_SCO_TX", "INT_BT_SCO_TX"}, {"MultiMedia1 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia4 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, + {"MultiMedia16 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia17 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia18 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MultiMedia19 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, @@ -14034,6 +14179,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"MultiMedia19 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MultiMedia5 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MultiMedia8 Mixer", "AFE_PCM_TX", "PCM_TX"}, + {"MultiMedia16 Mixer", "AFE_PCM_TX", "PCM_TX"}, {"MM_UL1", NULL, "MultiMedia1 Mixer"}, {"MultiMedia2 Mixer", "INTERNAL_FM_TX", "INT_FM_TX"}, {"MM_UL2", NULL, "MultiMedia2 Mixer"}, @@ -14043,6 +14189,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"MM_UL6", NULL, "MultiMedia6 Mixer"}, {"MM_UL8", NULL, "MultiMedia8 Mixer"}, {"MM_UL9", NULL, "MultiMedia9 Mixer"}, + {"MM_UL16", NULL, "MultiMedia16 Mixer"}, {"MM_UL17", NULL, "MultiMedia17 Mixer"}, {"MM_UL18", NULL, "MultiMedia18 Mixer"}, {"MM_UL19", NULL, "MultiMedia19 Mixer"}, @@ -14461,6 +14608,7 @@ static const struct snd_soc_dapm_route intercon[] = { {"MM_UL6", NULL, "AUDIO_REF_EC_UL6 MUX"}, {"MM_UL8", NULL, "AUDIO_REF_EC_UL8 MUX"}, {"MM_UL9", NULL, "AUDIO_REF_EC_UL9 MUX"}, + {"MM_UL16", NULL, "AUDIO_REF_EC_UL16 MUX"}, {"MM_UL17", NULL, "AUDIO_REF_EC_UL17 MUX"}, {"MM_UL18", NULL, "AUDIO_REF_EC_UL18 MUX"}, {"MM_UL19", NULL, "AUDIO_REF_EC_UL19 MUX"}, diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c index e1bfc950d0e3..018681309f2e 100644 --- a/sound/soc/msm/qdsp6v2/q6adm.c +++ b/sound/soc/msm/qdsp6v2/q6adm.c @@ -2364,7 +2364,8 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology, struct adm_cmd_device_open_v5 open; struct adm_cmd_device_open_v6 open_v6; int ret = 0; - int port_idx, copp_idx, flags; + int port_idx, flags; + int copp_idx = -1; int tmp_port = q6audio_get_port_id(port_id); pr_debug("%s:port %#x path:%d rate:%d mode:%d perf_mode:%d,topo_id %d\n", @@ -2418,8 +2419,17 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology, (topology == VPM_TX_DM_RFECNS_COPP_TOPOLOGY)) rate = 16000; - copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, perf_mode, - rate, bit_width, app_type); + /* + * Routing driver reuses the same adm for streams with the same + * app_type, sample_rate etc. + * This isn't allowed for ULL streams as per the DSP interface + */ + if (perf_mode != ULTRA_LOW_LATENCY_PCM_MODE) + copp_idx = adm_get_idx_if_copp_exists(port_idx, topology, + perf_mode, + rate, bit_width, + app_type); + if (copp_idx < 0) { copp_idx = adm_get_next_available_copp(port_idx); if (copp_idx >= MAX_COPPS_PER_PORT) { diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c index 1c03d8c9e797..30c3ffe2347d 100644 --- a/sound/soc/msm/sdm660-ext-dai-links.c +++ b/sound/soc/msm/sdm660-ext-dai-links.c @@ -1270,10 +1270,10 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, {/* hw:x,33 */ - .name = MSM_DAILINK_NAME(Compress9), - .stream_name = "Compress9", + .name = MSM_DAILINK_NAME(ULL_NOIRQ_2), + .stream_name = "MM_NOIRQ_2", .cpu_dai_name = "MultiMedia16", - .platform_name = "msm-compress-dsp", + .platform_name = "msm-pcm-dsp-noirq", .dynamic = 1, .dpcm_capture = 1, .dpcm_playback = 1, diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c index f4219148e81c..0f28e100f535 100644 --- a/sound/soc/msm/sdm660-internal.c +++ b/sound/soc/msm/sdm660-internal.c @@ -2188,10 +2188,10 @@ static struct snd_soc_dai_link msm_int_dai[] = { .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, {/* hw:x,33 */ - .name = MSM_DAILINK_NAME(Compress9), - .stream_name = "Compress9", + .name = MSM_DAILINK_NAME(ULL_NOIRQ_2), + .stream_name = "MM_NOIRQ_2", .cpu_dai_name = "MultiMedia16", - .platform_name = "msm-compress-dsp", + .platform_name = "msm-pcm-dsp-noirq", .dynamic = 1, .dpcm_capture = 1, .dpcm_playback = 1, diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index fa7208a32d76..8a679b21f0c4 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -115,4 +115,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #define WRITE_ONCE(x, val) \ ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) + +#ifndef __fallthrough +# if defined(__GNUC__) && __GNUC__ >= 7 +# define __fallthrough __attribute__ ((fallthrough)) +# else +# define __fallthrough +# endif +#endif + #endif /* _TOOLS_LINUX_COMPILER_H */ diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h index c808c7d02d21..e69118b2077e 100644 --- a/tools/lib/lockdep/uinclude/linux/lockdep.h +++ b/tools/lib/lockdep/uinclude/linux/lockdep.h @@ -8,7 +8,7 @@ #include <linux/utsname.h> #include <linux/compiler.h> -#define MAX_LOCK_DEPTH 2000UL +#define MAX_LOCK_DEPTH 255UL #define asmlinkage #define __visible diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c index d28c1b6a3b54..fa5d17af88b7 100644 --- a/tools/perf/arch/x86/tests/intel-cqm.c +++ b/tools/perf/arch/x86/tests/intel-cqm.c @@ -17,7 +17,7 @@ static pid_t spawn(void) if (pid) return pid; - while(1); + while(1) sleep(5); return 0; } diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c index 9223c164e545..1f86ee8fb831 100644 --- a/tools/perf/arch/x86/util/dwarf-regs.c +++ b/tools/perf/arch/x86/util/dwarf-regs.c @@ -63,6 +63,8 @@ struct pt_regs_offset { # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} #endif +/* TODO: switching by dwarf address size */ +#ifndef __x86_64__ static const struct pt_regs_offset x86_32_regoffset_table[] = { REG_OFFSET_NAME_32("%ax", eax), REG_OFFSET_NAME_32("%cx", ecx), @@ -75,6 +77,8 @@ static const struct pt_regs_offset x86_32_regoffset_table[] = { REG_OFFSET_END, }; +#define regoffset_table x86_32_regoffset_table +#else static const struct pt_regs_offset x86_64_regoffset_table[] = { REG_OFFSET_NAME_64("%ax", rax), REG_OFFSET_NAME_64("%dx", rdx), @@ -95,11 +99,7 @@ static const struct pt_regs_offset x86_64_regoffset_table[] = { REG_OFFSET_END, }; -/* TODO: switching by dwarf address size */ -#ifdef __x86_64__ #define regoffset_table x86_64_regoffset_table -#else -#define regoffset_table x86_32_regoffset_table #endif /* Minus 1 for the ending REG_OFFSET_END */ diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index 492df2752a2d..b4eb5b679081 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c @@ -1570,13 +1570,13 @@ static int __bench_numa(const char *name) "GB/sec,", "total-speed", "GB/sec total speed"); if (g->p.show_details >= 2) { - char tname[32]; + char tname[14 + 2 * 10 + 1]; struct thread_data *td; for (p = 0; p < g->p.nr_proc; p++) { for (t = 0; t < g->p.nr_threads; t++) { - memset(tname, 0, 32); + memset(tname, 0, sizeof(tname)); td = g->threads + p*g->p.nr_threads + t; - snprintf(tname, 32, "process%d:thread%d", p, t); + snprintf(tname, sizeof(tname), "process%d:thread%d", p, t); print_res(tname, td->speed_gbs, "GB/sec", "thread-speed", "GB/sec/thread speed"); print_res(tname, td->system_time_ns / 1e9, diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 368d1e1561f7..48840556bf2d 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1253,21 +1253,19 @@ static int is_directory(const char *base_path, const struct dirent *dent) return S_ISDIR(st.st_mode); } -#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\ - while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \ - lang_next) \ - if ((lang_dirent.d_type == DT_DIR || \ - (lang_dirent.d_type == DT_UNKNOWN && \ - is_directory(scripts_path, &lang_dirent))) && \ - (strcmp(lang_dirent.d_name, ".")) && \ - (strcmp(lang_dirent.d_name, ".."))) - -#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\ - while (!readdir_r(lang_dir, &script_dirent, &script_next) && \ - script_next) \ - if (script_dirent.d_type != DT_DIR && \ - (script_dirent.d_type != DT_UNKNOWN || \ - !is_directory(lang_path, &script_dirent))) +#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \ + while ((lang_dirent = readdir(scripts_dir)) != NULL) \ + if ((lang_dirent->d_type == DT_DIR || \ + (lang_dirent->d_type == DT_UNKNOWN && \ + is_directory(scripts_path, lang_dirent))) && \ + (strcmp(lang_dirent->d_name, ".")) && \ + (strcmp(lang_dirent->d_name, ".."))) + +#define for_each_script(lang_path, lang_dir, script_dirent) \ + while ((script_dirent = readdir(lang_dir)) != NULL) \ + if (script_dirent->d_type != DT_DIR && \ + (script_dirent->d_type != DT_UNKNOWN || \ + !is_directory(lang_path, script_dirent))) #define RECORD_SUFFIX "-record" @@ -1413,7 +1411,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused, const char *s __maybe_unused, int unset __maybe_unused) { - struct dirent *script_next, *lang_next, script_dirent, lang_dirent; + struct dirent *script_dirent, *lang_dirent; char scripts_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; char script_path[MAXPATHLEN]; @@ -1428,19 +1426,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused, if (!scripts_dir) return -1; - for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { + for_each_lang(scripts_path, scripts_dir, lang_dirent) { snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, - lang_dirent.d_name); + lang_dirent->d_name); lang_dir = opendir(lang_path); if (!lang_dir) continue; - for_each_script(lang_path, lang_dir, script_dirent, script_next) { - script_root = get_script_root(&script_dirent, REPORT_SUFFIX); + for_each_script(lang_path, lang_dir, script_dirent) { + script_root = get_script_root(script_dirent, REPORT_SUFFIX); if (script_root) { desc = script_desc__findnew(script_root); snprintf(script_path, MAXPATHLEN, "%s/%s", - lang_path, script_dirent.d_name); + lang_path, script_dirent->d_name); read_script_info(desc, script_path); free(script_root); } @@ -1528,7 +1526,7 @@ static int check_ev_match(char *dir_name, char *scriptname, */ int find_scripts(char **scripts_array, char **scripts_path_array) { - struct dirent *script_next, *lang_next, script_dirent, lang_dirent; + struct dirent *script_dirent, *lang_dirent; char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; struct perf_session *session; @@ -1551,9 +1549,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array) return -1; } - for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { + for_each_lang(scripts_path, scripts_dir, lang_dirent) { snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, - lang_dirent.d_name); + lang_dirent->d_name); #ifdef NO_LIBPERL if (strstr(lang_path, "perl")) continue; @@ -1567,16 +1565,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array) if (!lang_dir) continue; - for_each_script(lang_path, lang_dir, script_dirent, script_next) { + for_each_script(lang_path, lang_dir, script_dirent) { /* Skip those real time scripts: xxxtop.p[yl] */ - if (strstr(script_dirent.d_name, "top.")) + if (strstr(script_dirent->d_name, "top.")) continue; sprintf(scripts_path_array[i], "%s/%s", lang_path, - script_dirent.d_name); - temp = strchr(script_dirent.d_name, '.'); + script_dirent->d_name); + temp = strchr(script_dirent->d_name, '.'); snprintf(scripts_array[i], - (temp - script_dirent.d_name) + 1, - "%s", script_dirent.d_name); + (temp - script_dirent->d_name) + 1, + "%s", script_dirent->d_name); if (check_ev_match(lang_path, scripts_array[i], session)) @@ -1594,7 +1592,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array) static char *get_script_path(const char *script_root, const char *suffix) { - struct dirent *script_next, *lang_next, script_dirent, lang_dirent; + struct dirent *script_dirent, *lang_dirent; char scripts_path[MAXPATHLEN]; char script_path[MAXPATHLEN]; DIR *scripts_dir, *lang_dir; @@ -1607,21 +1605,21 @@ static char *get_script_path(const char *script_root, const char *suffix) if (!scripts_dir) return NULL; - for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { + for_each_lang(scripts_path, scripts_dir, lang_dirent) { snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, - lang_dirent.d_name); + lang_dirent->d_name); lang_dir = opendir(lang_path); if (!lang_dir) continue; - for_each_script(lang_path, lang_dir, script_dirent, script_next) { - __script_root = get_script_root(&script_dirent, suffix); + for_each_script(lang_path, lang_dir, script_dirent) { + __script_root = get_script_root(script_dirent, suffix); if (__script_root && !strcmp(script_root, __script_root)) { free(__script_root); closedir(lang_dir); closedir(scripts_dir); snprintf(script_path, MAXPATHLEN, "%s/%s", - lang_path, script_dirent.d_name); + lang_path, script_dirent->d_name); return strdup(script_path); } free(__script_root); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 7e2e72e6d9d1..4a8a02c302d2 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -636,7 +636,7 @@ repeat: case -1: if (errno == EINTR) continue; - /* Fall trhu */ + __fallthrough; default: c = getc(stdin); tcsetattr(0, TCSAFLUSH, &save); diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index c783d8fd3a80..ebe7115c751a 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -1617,6 +1617,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine, color_fprintf(trace->output, PERF_COLOR_RED, "LOST %" PRIu64 " events!\n", event->lost.lost); ret = machine__process_lost_event(machine, event, sample); + break; default: ret = machine__process_event(machine, event, sample); break; diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index 636d7b42d844..54af2f2e2ee4 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -1727,15 +1727,14 @@ static int test_pmu_events(void) } while (!ret && (ent = readdir(dir))) { -#define MAX_NAME 100 struct evlist_test e; - char name[MAX_NAME]; + char name[2 * NAME_MAX + 1 + 12 + 3]; if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) continue; - snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name); + snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name); e.name = name; e.check = test__checkevent_pmu_events; @@ -1743,11 +1742,10 @@ static int test_pmu_events(void) ret = test_event(&e); if (ret) break; - snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); + snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name); e.name = name; e.check = test__checkevent_pmu_events_mix; ret = test_event(&e); -#undef MAX_NAME } closedir(dir); diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index d4d7cc27252f..718bd46d47fa 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser, nd = browser->curr_hot; break; case K_UNTAB: - if (nd != NULL) + if (nd != NULL) { nd = rb_next(nd); if (nd == NULL) nd = rb_first(&browser->entries); - else + } else nd = browser->curr_hot; break; case K_F1: diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 956187bf1a85..26cba64345e3 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -416,7 +416,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, { char filename[PATH_MAX]; DIR *tasks; - struct dirent dirent, *next; + struct dirent *dirent; pid_t tgid, ppid; int rc = 0; @@ -445,11 +445,11 @@ static int __event__synthesize_thread(union perf_event *comm_event, return 0; } - while (!readdir_r(tasks, &dirent, &next) && next) { + while ((dirent = readdir(tasks)) != NULL) { char *end; pid_t _pid; - _pid = strtol(dirent.d_name, &end, 10); + _pid = strtol(dirent->d_name, &end, 10); if (*end) continue; @@ -558,7 +558,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, { DIR *proc; char proc_path[PATH_MAX]; - struct dirent dirent, *next; + struct dirent *dirent; union perf_event *comm_event, *mmap_event, *fork_event; int err = -1; @@ -583,9 +583,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (proc == NULL) goto out_free_fork; - while (!readdir_r(proc, &dirent, &next) && next) { + while ((dirent = readdir(proc)) != NULL) { char *end; - pid_t pid = strtol(dirent.d_name, &end, 10); + pid_t pid = strtol(dirent->d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 71df7acf8643..933a509a90f8 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -22,6 +22,7 @@ #include <errno.h> #include <stdint.h> #include <inttypes.h> +#include <linux/compiler.h> #include "../cache.h" #include "../util.h" @@ -1708,6 +1709,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) switch (decoder->packet.type) { case INTEL_PT_TIP_PGD: decoder->continuous_period = false; + __fallthrough; case INTEL_PT_TIP_PGE: case INTEL_PT_TIP: intel_pt_log("ERROR: Unexpected packet\n"); @@ -1762,6 +1764,8 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) decoder->pge = false; decoder->continuous_period = false; intel_pt_clear_tx_flags(decoder); + __fallthrough; + case INTEL_PT_TNT: decoder->have_tma = false; intel_pt_log("ERROR: Unexpected packet\n"); @@ -1802,6 +1806,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) switch (decoder->packet.type) { case INTEL_PT_TIP_PGD: decoder->continuous_period = false; + __fallthrough; case INTEL_PT_TIP_PGE: case INTEL_PT_TIP: decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index b1257c816310..9b2fce25162b 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c @@ -17,6 +17,7 @@ #include <string.h> #include <endian.h> #include <byteswap.h> +#include <linux/compiler.h> #include "intel-pt-pkt-decoder.h" @@ -488,6 +489,7 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, case INTEL_PT_FUP: if (!(packet->count)) return snprintf(buf, buf_len, "%s no ip", name); + __fallthrough; case INTEL_PT_CYC: case INTEL_PT_VMCS: case INTEL_PT_MTC: diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 854dd2105bd5..881bbb5e7912 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = { #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE) #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT) -#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \ - while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \ - if (sys_dirent.d_type == DT_DIR && \ - (strcmp(sys_dirent.d_name, ".")) && \ - (strcmp(sys_dirent.d_name, ".."))) +#define for_each_subsystem(sys_dir, sys_dirent) \ + while ((sys_dirent = readdir(sys_dir)) != NULL) \ + if (sys_dirent->d_type == DT_DIR && \ + (strcmp(sys_dirent->d_name, ".")) && \ + (strcmp(sys_dirent->d_name, ".."))) static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) { @@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir) return 0; } -#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \ - while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \ - if (evt_dirent.d_type == DT_DIR && \ - (strcmp(evt_dirent.d_name, ".")) && \ - (strcmp(evt_dirent.d_name, "..")) && \ - (!tp_event_has_id(&sys_dirent, &evt_dirent))) +#define for_each_event(sys_dirent, evt_dir, evt_dirent) \ + while ((evt_dirent = readdir(evt_dir)) != NULL) \ + if (evt_dirent->d_type == DT_DIR && \ + (strcmp(evt_dirent->d_name, ".")) && \ + (strcmp(evt_dirent->d_name, "..")) && \ + (!tp_event_has_id(sys_dirent, evt_dirent))) #define MAX_EVENT_LENGTH 512 @@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) { struct tracepoint_path *path = NULL; DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char id_buf[24]; int fd; u64 id; @@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) if (!sys_dir) return NULL; - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path, - evt_dirent.d_name); + evt_dirent->d_name); fd = open(evt_path, O_RDONLY); if (fd < 0) continue; @@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config) free(path); return NULL; } - strncpy(path->system, sys_dirent.d_name, + strncpy(path->system, sys_dirent->d_name, MAX_EVENT_LENGTH); - strncpy(path->name, evt_dirent.d_name, + strncpy(path->name, evt_dirent->d_name, MAX_EVENT_LENGTH); return path; } @@ -1662,7 +1662,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob, bool name_only) { DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; char **evt_list = NULL; @@ -1680,20 +1680,20 @@ restart: goto out_close_sys_dir; } - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { if (subsys_glob != NULL && - !strglobmatch(sys_dirent.d_name, subsys_glob)) + !strglobmatch(sys_dirent->d_name, subsys_glob)) continue; snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { if (event_glob != NULL && - !strglobmatch(evt_dirent.d_name, event_glob)) + !strglobmatch(evt_dirent->d_name, event_glob)) continue; if (!evt_num_known) { @@ -1702,7 +1702,7 @@ restart: } snprintf(evt_path, MAXPATHLEN, "%s:%s", - sys_dirent.d_name, evt_dirent.d_name); + sys_dirent->d_name, evt_dirent->d_name); evt_list[evt_i] = strdup(evt_path); if (evt_list[evt_i] == NULL) @@ -1755,7 +1755,7 @@ out_close_sys_dir: int is_valid_tracepoint(const char *event_string) { DIR *sys_dir, *evt_dir; - struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; + struct dirent *sys_dirent, *evt_dirent; char evt_path[MAXPATHLEN]; char dir_path[MAXPATHLEN]; @@ -1763,17 +1763,17 @@ int is_valid_tracepoint(const char *event_string) if (!sys_dir) return 0; - for_each_subsystem(sys_dir, sys_dirent, sys_next) { + for_each_subsystem(sys_dir, sys_dirent) { snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path, - sys_dirent.d_name); + sys_dirent->d_name); evt_dir = opendir(dir_path); if (!evt_dir) continue; - for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { + for_each_event(sys_dirent, evt_dir, evt_dirent) { snprintf(evt_path, MAXPATHLEN, "%s:%s", - sys_dirent.d_name, evt_dirent.d_name); + sys_dirent->d_name, evt_dirent->d_name); if (!strcmp(evt_path, event_string)) { closedir(evt_dir); closedir(sys_dir); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 6f2a0279476c..593066c68e3d 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n if (fd == -1) return -1; - sret = read(fd, alias->unit, UNIT_MAX_LEN); + sret = read(fd, alias->unit, UNIT_MAX_LEN); if (sret < 0) goto error; diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build index 6516e220c247..82d28c67e0f3 100644 --- a/tools/perf/util/scripting-engines/Build +++ b/tools/perf/util/scripting-engines/Build @@ -1,6 +1,6 @@ libperf-$(CONFIG_LIBPERL) += trace-event-perl.o libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o -CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default +CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c index bcae659b6546..efb53772e0ec 100644 --- a/tools/perf/util/strfilter.c +++ b/tools/perf/util/strfilter.c @@ -269,6 +269,7 @@ static int strfilter_node__sprint(struct strfilter_node *node, char *buf) len = strfilter_node__sprint_pt(node->l, buf); if (len < 0) return len; + __fallthrough; case '!': if (buf) { *(buf + len++) = *node->p; diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c index fc8781de62db..accb7ece1d3c 100644 --- a/tools/perf/util/string.c +++ b/tools/perf/util/string.c @@ -21,6 +21,8 @@ s64 perf_atoll(const char *str) case 'b': case 'B': if (*p) goto out_err; + + __fallthrough; case '\0': return length; default: diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 0a9ae8014729..829508a21448 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -227,7 +227,7 @@ void thread__find_cpumode_addr_location(struct thread *thread, struct addr_location *al) { size_t i; - const u8 const cpumodes[] = { + const u8 cpumodes[] = { PERF_RECORD_MISC_USER, PERF_RECORD_MISC_KERNEL, PERF_RECORD_MISC_GUEST_USER, diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 6ec3c5ca438f..4e666b95b87e 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -92,8 +92,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) { DIR *proc; int max_threads = 32, items, i; - char path[256]; - struct dirent dirent, *next, **namelist = NULL; + char path[NAME_MAX + 1 + 6]; + struct dirent *dirent, **namelist = NULL; struct thread_map *threads = thread_map__alloc(max_threads); if (threads == NULL) @@ -106,16 +106,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid) threads->nr = 0; atomic_set(&threads->refcnt, 1); - while (!readdir_r(proc, &dirent, &next) && next) { + while ((dirent = readdir(proc)) != NULL) { char *end; bool grow = false; struct stat st; - pid_t pid = strtol(dirent.d_name, &end, 10); + pid_t pid = strtol(dirent->d_name, &end, 10); if (*end) /* only interested in proper numerical dirents */ continue; - snprintf(path, sizeof(path), "/proc/%s", dirent.d_name); + snprintf(path, sizeof(path), "/proc/%s", dirent->d_name); if (stat(path, &st) != 0) continue; diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c index 10a21a958aaf..763f37fecfb8 100644 --- a/tools/testing/selftests/capabilities/test_execve.c +++ b/tools/testing/selftests/capabilities/test_execve.c @@ -138,9 +138,6 @@ static void chdir_to_tmpfs(void) if (chdir(cwd) != 0) err(1, "chdir to private tmpfs"); - - if (umount2(".", MNT_DETACH) != 0) - err(1, "detach private tmpfs"); } static void copy_fromat_to(int fromfd, const char *fromname, const char *toname) @@ -248,7 +245,7 @@ static int do_tests(int uid, const char *our_path) err(1, "chown"); if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0) err(1, "chmod"); -} + } capng_get_caps_process(); @@ -384,7 +381,7 @@ static int do_tests(int uid, const char *our_path) } else { printf("[RUN]\tNon-root +ia, sgidnonroot => i\n"); exec_other_validate_cap("./validate_cap_sgidnonroot", - false, false, true, false); + false, false, true, false); if (fork_wait()) { printf("[RUN]\tNon-root +ia, sgidroot => i\n"); |
