diff options
248 files changed, 12835 insertions, 3347 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt index d1f8ce1e5ac8..a9d2a2456cfd 100644 --- a/Documentation/devicetree/bindings/arm/msm/imem.txt +++ b/Documentation/devicetree/bindings/arm/msm/imem.txt @@ -46,6 +46,12 @@ Required properties: -compatible: "qcom,msm-imem-restart_reason -reg: start address and size of restart_reason region in imem +Download Mode Type: +------------------- +Required properties: +-compatible: "qcom,msm-imem-dload-type" +-reg: start address and size of dload type region in imem + Download Mode: -------------- Required properties: diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt new file mode 100644 index 000000000000..9f8ea0d6ef8f --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt @@ -0,0 +1,23 @@ +Qualcomm Technologies, Inc Graphics Clock & Reset Controller Binding +-------------------------------------------------------------------- + +Required properties : +- compatible : shall contain only one of the following: + + "qcom,gpucc-msmfalcon" + +- reg : shall contain base register location and length +- #clock-cells : shall contain 1 +- #reset-cells : shall contain 1 + +Optional properties : +- #power-domain-cells : shall contain 1 + +Example: + clock-controller@4000000 { + compatible = "qcom,gpucc-msmfalcon"; + reg = <<0x5065000 0x10000>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt index 90abf0305319..68b8f09238e0 100644 --- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt +++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt @@ -249,6 +249,35 @@ Optional properties: 60 = 60 frames per second (default) - qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz. 0 = default value. +- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region + (in terms of scanlines) where the +hardware is allowed + to trigger a data transfer from MDP to DSI. + If this property is used, the region must be defined setting + two values, the low and the high thresholds: + <low_threshold high_threshold> + Where following condition must be met: + low_threshold < high_threshold + These values will be used by the driver in such way that if + the Driver receives a request to kickoff a transfer (MDP to DSI), + the transfer will be triggered only if the following condition + is satisfied: + low_threshold < scanline < high_threshold + If the condition is not met, then the driver will delay the + transfer by the time defined in the following property: + "qcom,mdss-mdp-kickoff-delay". + So in order to use this property, the delay property must + be defined as well and greater than 0. +- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that + the driver will delay before triggering an MDP transfer if the + thresholds defined by the following property are not met: + "qcom,mdss-mdp-kickoff-threshold". + So in order to use this property, the threshold property must + be defined as well. Note that this delay cannot be zero + and also should not be greater than +the fps window. + i.e. For 60fps value should not exceed +16666 uS. - qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode panels in microseconds. Driver uses this number to adjust the clock rate according to the expected transfer time. @@ -568,6 +597,8 @@ Example: qcom,mdss-dsi-dma-trigger = <0>; qcom,mdss-dsi-panel-framerate = <60>; qcom,mdss-dsi-panel-clockrate = <424000000>; + qcom,mdss-mdp-kickoff-threshold = <11 2430>; + qcom,mdss-mdp-kickoff-delay = <1000>; qcom,mdss-mdp-transfer-time-us = <12500>; qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33 22 27 1e 03 04 00]; diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt index de88a6eba7a5..b399145ea8a2 100644 --- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt +++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt @@ -36,8 +36,6 @@ Optional properties: for secure buffer allocation - qcom,secure_align_mask: A mask for determining how secure buffers need to be aligned -- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should - be enabled. - List of sub nodes, one for each of the translation context banks supported. The driver uses the names of these nodes to determine how they are used, diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt index fffb8cc39d0f..ca58f0da07ef 100644 --- a/Documentation/devicetree/bindings/gpu/adreno.txt +++ b/Documentation/devicetree/bindings/gpu/adreno.txt @@ -139,6 +139,10 @@ Optional Properties: baseAddr - base address of the gpu channels in the qdss stm memory region size - size of the gpu stm region +- qcom,tsens-name: + Specify the name of GPU temperature sensor. This name will be used + to get the temperature from the thermal driver API. + GPU Quirks: - qcom,gpu-quirk-two-pass-use-wfi: Signal the GPU to set Set TWOPASSUSEWFI bit in diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt index 721a4f72563e..1ab49edfe30c 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt +++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt @@ -41,6 +41,10 @@ The channel list supported by the RRADC driver is available in the enum rradc_ch located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum as the channel number while requesting ADC reads. +Optional property: +- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the + PMIC fabrication ID for applying the appropriate temperature + compensation parameters. Example: /* RRADC node */ diff --git a/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt new file mode 100644 index 000000000000..02f21835f870 --- /dev/null +++ b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt @@ -0,0 +1,10 @@ +PixArt pat9125 rotating switch + +The Pixart's PAT9125 controller is connected to the host processor via I2C. +It detects the rotation when user rotates the switch and generates interrupt +to the Host processor. The host processor reads the direction and number of +steps over I2C and passes the data to the rest of the system. + +Required properties: + + - compatible : should be "pixart,pat9125". diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt index a8db893f6709..80f2d8f43e35 100644 --- a/Documentation/devicetree/bindings/platform/msm/ipa.txt +++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt @@ -36,7 +36,6 @@ Optional: compatible "qcom,ipa-smmu-wlan-cb" - ipa_smmu_uc: uc SMMU device compatible "qcom,ipa-smmu-uc-cb" -- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching - qcom,use-a2-service: determine if A2 service will be used - qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used - qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt index 93312df2a43b..babc4523a29a 100644 --- a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt +++ b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt @@ -6,6 +6,10 @@ Required properties: - compatible : should be "qcom,qpnp-revid" - reg : offset and length of the PMIC peripheral register map. +Optional property: +- qcom,fab-id-valid: Use this property when support to read Fab + identification from REV ID peripheral is available. + Example: qcom,revid@100 { compatible = "qcom,qpnp-revid"; diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt index 21404dfc4b7b..12ac75a8608c 100644 --- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt +++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt @@ -53,6 +53,12 @@ Charger specific properties: Definition: Specifies the USB input current limit in micro-amps. If the value is not present, 1.5Amps is used as default. +- qcom,usb-ocl-ua + Usage: optional + Value type: <u32> + Definition: Specifies the OTG output current limit in micro-amps. + If the value is not present, 1.5Amps is used as default + - qcom,dc-icl-ua Usage: optional Value type: <u32> diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt index c784a01d6411..8cb513b5605f 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt @@ -15,6 +15,7 @@ Required device bindings: - reg-names: Name for the above register. "qpnp-lpg-channel-base" = physical base address of the controller's LPG channel register. +- qcom,lpg-lut-size: LPG LUT size. - qcom,channel-id: channel Id for the PWM. - qcom,supported-sizes: Supported PWM sizes. Following three pwm sizes lists are supported by PWM/LPG controllers. diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 1d7e54f68ee4..91412a10bf65 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -175,6 +175,7 @@ parade Parade Technologies Inc. pericom Pericom Technology Inc. phytec PHYTEC Messtechnik GmbH picochip Picochip Ltd +pixart PixArt Imaging Inc plathome Plat'Home Co., Ltd. plda PLDA pixcir PIXCIR MICROELECTRONICS Co., Ltd diff --git a/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi new file mode 100644 index 000000000000..6c17bca64a86 --- /dev/null +++ b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi @@ -0,0 +1,80 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&mdss_mdp { + dsi_dual_jdi_a407_cmd: qcom,mdss_dsi_jdi_a407_wqhd_cmd { + qcom,mdss-dsi-panel-name = "JDI a407 wqhd cmd mode dsi panel"; + qcom,mdss-dsi-panel-type = "dsi_cmd_mode"; + qcom,mdss-dsi-panel-framerate = <60>; + qcom,mdss-dsi-virtual-channel-id = <0>; + qcom,mdss-dsi-stream = <0>; + qcom,mdss-dsi-panel-width = <720>; + qcom,mdss-dsi-panel-height = <2560>; + qcom,mdss-dsi-h-front-porch = <16>; + qcom,mdss-dsi-h-back-porch = <40>; + qcom,mdss-dsi-h-pulse-width = <4>; + qcom,mdss-dsi-h-sync-skew = <0>; + qcom,mdss-dsi-v-back-porch = <20>; + qcom,mdss-dsi-v-front-porch = <7>; + qcom,mdss-dsi-v-pulse-width = <1>; + qcom,mdss-dsi-h-left-border = <0>; + qcom,mdss-dsi-h-right-border = <0>; + qcom,mdss-dsi-v-top-border = <0>; + qcom,mdss-dsi-v-bottom-border = <0>; + qcom,mdss-dsi-bpp = <24>; + qcom,mdss-dsi-color-order = "rgb_swap_rgb"; + qcom,mdss-dsi-underflow-color = <0xff>; + qcom,mdss-dsi-border-color = <0>; + qcom,mdss-dsi-on-command = [ + 15 01 00 00 00 00 02 35 00 + 05 01 00 00 78 00 02 11 00 + 05 01 00 00 32 00 02 29 00]; + qcom,mdss-dsi-off-command = [ + 05 01 00 00 32 00 02 28 00 + 05 01 00 00 78 00 02 10 00]; + qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; + qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; + qcom,mdss-dsi-traffic-mode = "burst_mode"; + qcom,mdss-dsi-lane-map = "lane_map_0123"; + qcom,mdss-dsi-bllp-eof-power-mode; + qcom,mdss-dsi-bllp-power-mode; + qcom,mdss-dsi-lane-0-state; + qcom,mdss-dsi-lane-1-state; + qcom,mdss-dsi-lane-2-state; + qcom,mdss-dsi-lane-3-state; + qcom,mdss-dsi-dma-trigger = "trigger_sw"; + qcom,mdss-dsi-mdp-trigger = "none"; + + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + + qcom,mdss-dsi-te-using-te-pin; + qcom,mdss-dsi-te-pin-select = <1>; + qcom,mdss-dsi-te-v-sync-rd-ptr-irq-line = <0x2c>; + qcom,mdss-dsi-te-dcs-command = <1>; + + qcom,mdss-dsi-lp11-init; + qcom,adjust-timer-wakeup-ms = <1>; + qcom,mdss-dsi-reset-sequence = <1 20>, <0 10>, <1 20>; + + qcom,config-select = <&dsi_dual_jdi_a407_cmd_config0>; + + dsi_dual_jdi_a407_cmd_config0: config0 { + qcom,split-mode = "dualctl-split"; + }; + + dsi_dual_jdi_a407_cmd_config1: config1 { + qcom,split-mode = "pingpong-split"; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi index 95a8e80ccdbd..9ad9e4adce00 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi @@ -43,7 +43,7 @@ qcom,mdss-dsi-t-clk-pre = <0x24>; qcom,mdss-dsi-dma-trigger = "trigger_sw"; qcom,mdss-dsi-mdp-trigger = "none"; - qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>; qcom,mdss-dsi-te-pin-select = <1>; qcom,mdss-dsi-wr-mem-start = <0x2c>; qcom,mdss-dsi-wr-mem-continue = <0x3c>; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi index fd11be721dbb..6b549a4af6eb 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi @@ -76,7 +76,7 @@ qcom,mdss-dsi-t-clk-pre = <0x24>; qcom,mdss-dsi-dma-trigger = "trigger_sw"; qcom,mdss-dsi-mdp-trigger = "none"; - qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>; qcom,compression-mode = "dsc"; qcom,config-select = <&dsi_nt35597_dsc_video_config0>; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi index b6f19b78ea70..1e42d0846acf 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -59,33 +59,33 @@ qcom,mdss-dsi-te-check-enable; qcom,mdss-dsi-te-using-te-pin; qcom,ulps-enabled; - qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10 - 15 01 00 00 10 00 02 fb 01 - 15 01 00 00 10 00 02 ba 03 - 15 01 00 00 10 00 02 e5 01 - 15 01 00 00 10 00 02 35 00 - 15 01 00 00 10 00 02 bb 10 - 15 01 00 00 10 00 02 b0 03 - 15 01 00 00 10 00 02 ff e0 - 15 01 00 00 10 00 02 fb 01 - 15 01 00 00 10 00 02 6b 3d - 15 01 00 00 10 00 02 6c 3d - 15 01 00 00 10 00 02 6d 3d - 15 01 00 00 10 00 02 6e 3d - 15 01 00 00 10 00 02 6f 3d - 15 01 00 00 10 00 02 35 02 - 15 01 00 00 10 00 02 36 72 - 15 01 00 00 10 00 02 37 10 - 15 01 00 00 10 00 02 08 c0 - 15 01 00 00 10 00 02 ff 24 - 15 01 00 00 10 00 02 fb 01 - 15 01 00 00 10 00 02 c6 06 - 15 01 00 00 10 00 02 ff 10 - 05 01 00 00 a0 00 02 11 00 - 05 01 00 00 a0 00 02 29 00]; + qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 ba 03 + 15 01 00 00 00 00 02 e5 01 + 15 01 00 00 00 00 02 35 00 + 15 01 00 00 00 00 02 bb 10 + 15 01 00 00 00 00 02 b0 03 + 15 01 00 00 00 00 02 ff e0 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 6b 3d + 15 01 00 00 00 00 02 6c 3d + 15 01 00 00 00 00 02 6d 3d + 15 01 00 00 00 00 02 6e 3d + 15 01 00 00 00 00 02 6f 3d + 15 01 00 00 00 00 02 35 02 + 15 01 00 00 00 00 02 36 72 + 15 01 00 00 00 00 02 37 10 + 15 01 00 00 00 00 02 08 c0 + 15 01 00 00 00 00 02 ff 24 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 c6 06 + 15 01 00 00 00 00 02 ff 10 + 05 01 00 00 78 00 02 11 00 + 05 01 00 00 32 00 02 29 00]; - qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00 - 05 01 00 00 78 00 02 10 00]; + qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00 + 05 01 00 00 3c 00 02 10 00]; qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi index 367384a8c3e5..82413bfbca89 100644 --- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi +++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi @@ -29,30 +29,30 @@ qcom,mdss-dsi-bpp = <24>; qcom,mdss-dsi-underflow-color = <0xff>; qcom,mdss-dsi-border-color = <0>; - qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10 - 15 01 00 00 10 00 02 fb 01 - 15 01 00 00 10 00 02 ba 03 - 15 01 00 00 10 00 02 e5 01 - 15 01 00 00 10 00 02 35 00 - 15 01 00 00 10 00 02 bb 03 - 15 01 00 00 10 00 02 b0 03 - 39 01 00 00 10 00 06 3b 03 08 08 64 9a - 15 01 00 00 10 00 02 ff e0 - 15 01 00 00 10 00 02 fb 01 - 15 01 00 00 10 00 02 6b 3d - 15 01 00 00 10 00 02 6c 3d - 15 01 00 00 10 00 02 6d 3d - 15 01 00 00 10 00 02 6e 3d - 15 01 00 00 10 00 02 6f 3d - 15 01 00 00 10 00 02 35 02 - 15 01 00 00 10 00 02 36 72 - 15 01 00 00 10 00 02 37 10 - 15 01 00 00 10 00 02 08 c0 - 15 01 00 00 10 00 02 ff 10 - 05 01 00 00 a0 00 02 11 00 - 05 01 00 00 a0 00 02 29 00]; - qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00 - 05 01 00 00 78 00 02 10 00]; + qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 ba 03 + 15 01 00 00 00 00 02 e5 01 + 15 01 00 00 00 00 02 35 00 + 15 01 00 00 00 00 02 bb 03 + 15 01 00 00 00 00 02 b0 03 + 39 01 00 00 00 00 06 3b 03 08 08 64 9a + 15 01 00 00 00 00 02 ff e0 + 15 01 00 00 00 00 02 fb 01 + 15 01 00 00 00 00 02 6b 3d + 15 01 00 00 00 00 02 6c 3d + 15 01 00 00 00 00 02 6d 3d + 15 01 00 00 00 00 02 6e 3d + 15 01 00 00 00 00 02 6f 3d + 15 01 00 00 00 00 02 35 02 + 15 01 00 00 00 00 02 36 72 + 15 01 00 00 00 00 02 37 10 + 15 01 00 00 00 00 02 08 c0 + 15 01 00 00 00 00 02 ff 10 + 05 01 00 00 78 00 02 11 00 + 05 01 00 00 32 00 02 29 00]; + qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00 + 05 01 00 00 3c 00 02 10 00]; qcom,mdss-dsi-on-command-state = "dsi_lp_mode"; qcom,mdss-dsi-off-command-state = "dsi_hs_mode"; qcom,mdss-dsi-h-sync-pulse = <0>; @@ -69,7 +69,7 @@ qcom,mdss-dsi-t-clk-pre = <0x2d>; qcom,mdss-dsi-dma-trigger = "trigger_sw"; qcom,mdss-dsi-mdp-trigger = "none"; - qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>; + qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>; qcom,mdss-dsi-min-refresh-rate = <55>; qcom,mdss-dsi-max-refresh-rate = <60>; qcom,mdss-dsi-pan-enable-dynamic-fps; diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi index 8a8782f5f8b3..28d230dfb6bf 100644 --- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi @@ -23,6 +23,7 @@ pmicobalt_revid: qcom,revid@100 { compatible = "qcom,qpnp-revid"; reg = <0x100 0x100>; + qcom,fab-id-valid; }; qcom,power-on@800 { @@ -310,6 +311,7 @@ #address-cells = <1>; #size-cells = <0>; #io-channel-cells = <1>; + qcom,pmic-revid = <&pmicobalt_revid>; }; pmicobalt_fg: qpnp,fg { @@ -385,6 +387,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <1>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <0>; @@ -398,6 +401,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <2>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <1>; @@ -411,6 +415,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <3>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <2>; @@ -423,6 +428,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <4>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <3>; @@ -435,6 +441,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <5>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <4>; @@ -447,6 +454,7 @@ <0xb042 0x7e>; reg-names = "qpnp-lpg-channel-base", "qpnp-lpg-lut-base"; + qcom,lpg-lut-size = <0x7e>; qcom,channel-id = <6>; qcom,supported-sizes = <6>, <9>; qcom,ramp-index = <5>; diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi index dc1bbcd13c36..7e88f524367f 100644 --- a/arch/arm/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm/boot/dts/qcom/msm8996.dtsi @@ -1355,7 +1355,7 @@ gdsc-vdd-supply = <&gdsc_pcie_0>; vreg-1.8-supply = <&pm8994_l12>; vreg-0.9-supply = <&pm8994_l28>; - vreg-cx-supply = <&pm8994_s1_corner_ao>; + vreg-cx-supply = <&pm8994_s1_corner>; qcom,vreg-0.9-voltage-level = <925000 925000 24000>; qcom,vreg-cx-voltage-level = <7 4 0>; @@ -1510,7 +1510,7 @@ gdsc-vdd-supply = <&gdsc_pcie_1>; vreg-1.8-supply = <&pm8994_l12>; vreg-0.9-supply = <&pm8994_l28>; - vreg-cx-supply = <&pm8994_s1_corner_ao>; + vreg-cx-supply = <&pm8994_s1_corner>; qcom,vreg-0.9-voltage-level = <925000 925000 24000>; qcom,vreg-cx-voltage-level = <7 5 0>; @@ -1663,10 +1663,10 @@ gdsc-vdd-supply = <&gdsc_pcie_2>; vreg-1.8-supply = <&pm8994_l12>; vreg-0.9-supply = <&pm8994_l28>; - vreg-cx-supply = <&pm8994_s1_corner_ao>; + vreg-cx-supply = <&pm8994_s1_corner>; qcom,vreg-0.9-voltage-level = <925000 925000 24000>; - qcom,vreg-cx-voltage-level = <7 4 0>; + qcom,vreg-cx-voltage-level = <7 5 0>; qcom,l1-supported; qcom,l1ss-supported; @@ -1815,6 +1815,11 @@ reg = <0x10 8>; }; + dload_type@18 { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x18 4>; + }; + restart_reason@65c { compatible = "qcom,msm-imem-restart_reason"; reg = <0x65c 4>; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi index 445f32df8aa4..a1d80075abe0 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi @@ -157,6 +157,7 @@ qcom,audio-routing = "RX_BIAS", "MCLK", + "MADINPUT", "MCLK", "AMIC2", "MIC BIAS2", "MIC BIAS2", "Headset Mic", "AMIC3", "MIC BIAS2", diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi index ed29dd9e1508..ed8eb8459e51 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-cdp.dtsi @@ -87,7 +87,7 @@ cam_vdig-supply = <&pmcobalt_s3>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; qcom,cam-vreg-min-voltage = <0 3312000 1352000>; - qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3600000 1352000>; qcom,cam-vreg-op-mode = <0 80000 105000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -132,7 +132,7 @@ cam_vana-supply = <&pmicobalt_bob>; qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; qcom,cam-vreg-min-voltage = <0 0 3312000>; - qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; qcom,cam-vreg-op-mode = <0 0 80000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -215,7 +215,7 @@ cam_vdig-supply = <&pmcobalt_s3>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; qcom,cam-vreg-min-voltage = <0 3312000 1352000>; - qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3600000 1352000>; qcom,cam-vreg-op-mode = <0 80000 105000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -259,7 +259,7 @@ cam_vana-supply = <&pmicobalt_bob>; qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; qcom,cam-vreg-min-voltage = <0 0 3312000>; - qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; qcom,cam-vreg-op-mode = <0 0 80000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi index 485bc560eef5..2be67ab52ba7 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-mtp.dtsi @@ -87,7 +87,7 @@ cam_vdig-supply = <&pmcobalt_s3>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; qcom,cam-vreg-min-voltage = <0 3312000 1352000>; - qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3600000 1352000>; qcom,cam-vreg-op-mode = <0 80000 105000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -132,7 +132,7 @@ cam_vana-supply = <&pmicobalt_bob>; qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; qcom,cam-vreg-min-voltage = <0 0 3312000>; - qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; qcom,cam-vreg-op-mode = <0 0 80000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -215,7 +215,7 @@ cam_vdig-supply = <&pmcobalt_s3>; qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; qcom,cam-vreg-min-voltage = <0 3312000 1352000>; - qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3600000 1352000>; qcom,cam-vreg-op-mode = <0 80000 105000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; @@ -259,7 +259,7 @@ cam_vana-supply = <&pmicobalt_bob>; qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; qcom,cam-vreg-min-voltage = <0 0 3312000>; - qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3600000>; qcom,cam-vreg-op-mode = <0 0 80000>; qcom,gpio-no-mux = <0>; pinctrl-names = "cam_default", "cam_suspend"; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi new file mode 100644 index 000000000000..4b435aee73b0 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-qrd.dtsi @@ -0,0 +1,356 @@ + +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + led_flash0: qcom,camera-flash@0 { + cell-index = <0>; + compatible = "qcom,camera-flash"; + qcom,flash-source = <&pmicobalt_flash0 &pmicobalt_flash1>; + qcom,torch-source = <&pmicobalt_torch0 &pmicobalt_torch1>; + qcom,switch-source = <&pmicobalt_switch0>; + status = "ok"; + }; + + led_flash1: qcom,camera-flash@1 { + cell-index = <1>; + compatible = "qcom,camera-flash"; + qcom,flash-source = <&pmicobalt_flash2>; + qcom,torch-source = <&pmicobalt_torch2>; + qcom,switch-source = <&pmicobalt_switch1>; + status = "ok"; + }; +}; + +&cci { + actuator0: qcom,actuator@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,actuator"; + qcom,cci-master = <0>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + }; + + actuator1: qcom,actuator@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,actuator"; + qcom,cci-master = <1>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + }; + + ois0: qcom,ois@0 { + cell-index = <0>; + reg = <0x0>; + compatible = "qcom,ois"; + qcom,cci-master = <0>; + gpios = <&tlmm 27 0>; + qcom,gpio-vaf = <0>; + qcom,gpio-req-tbl-num = <0>; + qcom,gpio-req-tbl-flags = <0>; + qcom,gpio-req-tbl-label = "CAM_VAF"; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_actuator_vaf_active>; + pinctrl-1 = <&cam_actuator_vaf_suspend>; + status = "disabled"; + }; + + eeprom0: qcom,eeprom@0 { + cell-index = <0>; + reg = <0>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmicobalt_bob>; + cam_vdig-supply = <&pmcobalt_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active + &cam_actuator_vaf_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend + &cam_actuator_vaf_suspend>; + gpios = <&tlmm 13 0>, + <&tlmm 30 0>, + <&pmcobalt_gpios 20 0>, + <&tlmm 29 0>, + <&tlmm 27 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-vana = <3>; + qcom,gpio-vaf = <4>; + qcom,gpio-req-tbl-num = <0 1 2 3 4>; + qcom,gpio-req-tbl-flags = <1 0 0 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0", + "CAM_VDIG", + "CAM_VANA", + "CAM_VAF"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss clk_mclk0_clk_src>, + <&clock_mmss clk_mmss_camss_mclk0_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom1: qcom,eeprom@1 { + cell-index = <1>; + reg = <0x1>; + compatible = "qcom,eeprom"; + cam_vdig-supply = <&pmcobalt_lvs1>; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmicobalt_bob>; + qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; + qcom,cam-vreg-min-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-op-mode = <0 0 80000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 15 0>, + <&tlmm 9 0>, + <&tlmm 8 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VANA1"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk2_clk_src>, + <&clock_mmss clk_mmss_camss_mclk2_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + eeprom2: qcom,eeprom@2 { + cell-index = <2>; + reg = <0x2>; + compatible = "qcom,eeprom"; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmcobalt_l22>; + cam_vdig-supply = <&pmcobalt_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-max-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 14 0>, + <&tlmm 28 0>, + <&pmcobalt_gpios 9 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk1_clk_src>, + <&clock_mmss clk_mmss_camss_mclk1_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@0 { + cell-index = <0>; + compatible = "qcom,camera"; + reg = <0x0>; + qcom,csiphy-sd-index = <0>; + qcom,csid-sd-index = <0>; + qcom,mount-angle = <90>; + qcom,led-flash-src = <&led_flash0>; + qcom,actuator-src = <&actuator0>; + qcom,ois-src = <&ois0>; + qcom,eeprom-src = <&eeprom0>; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmicobalt_bob>; + cam_vdig-supply = <&pmcobalt_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = <0 3312000 1352000>; + qcom,cam-vreg-max-voltage = <0 3312000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk0_active + &cam_sensor_rear_active>; + pinctrl-1 = <&cam_sensor_mclk0_suspend + &cam_sensor_rear_suspend>; + gpios = <&tlmm 13 0>, + <&tlmm 30 0>, + <&pmcobalt_gpios 20 0>, + <&tlmm 29 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-vana = <3>; + qcom,gpio-req-tbl-num = <0 1 2 3>; + qcom,gpio-req-tbl-flags = <1 0 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK0", + "CAM_RESET0", + "CAM_VDIG", + "CAM_VANA"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <0>; + status = "ok"; + clocks = <&clock_mmss clk_mclk0_clk_src>, + <&clock_mmss clk_mmss_camss_mclk0_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@1 { + cell-index = <1>; + compatible = "qcom,camera"; + reg = <0x1>; + qcom,csiphy-sd-index = <1>; + qcom,csid-sd-index = <1>; + qcom,mount-angle = <90>; + qcom,eeprom-src = <&eeprom1>; + cam_vdig-supply = <&pmcobalt_lvs1>; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmicobalt_bob>; + qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana"; + qcom,cam-vreg-min-voltage = <0 0 3312000>; + qcom,cam-vreg-max-voltage = <0 0 3312000>; + qcom,cam-vreg-op-mode = <0 0 80000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk2_active + &cam_sensor_rear2_active>; + pinctrl-1 = <&cam_sensor_mclk2_suspend + &cam_sensor_rear2_suspend>; + gpios = <&tlmm 15 0>, + <&tlmm 9 0>, + <&tlmm 8 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vana = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK1", + "CAM_RESET1", + "CAM_VANA1"; + qcom,sensor-position = <0>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk2_clk_src>, + <&clock_mmss clk_mmss_camss_mclk2_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; + + qcom,camera@2 { + cell-index = <2>; + compatible = "qcom,camera"; + reg = <0x02>; + qcom,csiphy-sd-index = <2>; + qcom,csid-sd-index = <2>; + qcom,mount-angle = <270>; + qcom,eeprom-src = <&eeprom2>; + qcom,led-flash-src = <&led_flash1>; + qcom,actuator-src = <&actuator1>; + cam_vio-supply = <&pmcobalt_lvs1>; + cam_vana-supply = <&pmcobalt_l22>; + cam_vdig-supply = <&pmcobalt_s3>; + qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig"; + qcom,cam-vreg-min-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-max-voltage = + <0 2864000 1352000>; + qcom,cam-vreg-op-mode = <0 80000 105000>; + qcom,gpio-no-mux = <0>; + pinctrl-names = "cam_default", "cam_suspend"; + pinctrl-0 = <&cam_sensor_mclk1_active + &cam_sensor_front_active>; + pinctrl-1 = <&cam_sensor_mclk1_suspend + &cam_sensor_front_suspend>; + gpios = <&tlmm 14 0>, + <&tlmm 28 0>, + <&pmcobalt_gpios 9 0>; + qcom,gpio-reset = <1>; + qcom,gpio-vdig = <2>; + qcom,gpio-req-tbl-num = <0 1 2>; + qcom,gpio-req-tbl-flags = <1 0 0>; + qcom,gpio-req-tbl-label = "CAMIF_MCLK2", + "CAM_RESET2", + "CAM_VDIG"; + qcom,sensor-position = <1>; + qcom,sensor-mode = <0>; + qcom,cci-master = <1>; + status = "ok"; + clocks = <&clock_mmss clk_mclk1_clk_src>, + <&clock_mmss clk_mmss_camss_mclk1_clk>; + clock-names = "cam_src_clk", "cam_clk"; + qcom,clock-rates = <24000000 0>; + }; +}; +&pmcobalt_gpios { + gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <0>; /* VIN1 GPIO_LV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; + + gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */ + qcom,mode = <1>; /* Output */ + qcom,pull = <5>; /* No Pull */ + qcom,vin-sel = <1>; /* VIN1 GPIO_MV */ + qcom,src-sel = <0>; /* GPIO */ + qcom,invert = <0>; /* Invert */ + qcom,master-en = <1>; /* Enable GPIO */ + status = "ok"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi index 190feb5000fc..a37fa26b1055 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi @@ -147,21 +147,20 @@ <&clock_mmss clk_mmss_camss_top_ahb_clk>, <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csi0_clk_src>, + <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csi0_clk>, - <&clock_mmss clk_mmss_camss_csiphy0_clk>, <&clock_mmss clk_mmss_camss_csi0_ahb_clk>, <&clock_mmss clk_mmss_camss_csi0rdi_clk>, <&clock_mmss clk_mmss_camss_csi0pix_clk>, - <&clock_mmss clk_mmss_camss_cphy_csid0_clk>, - <&clock_mmss clk_csiphy_clk_src>; + <&clock_mmss clk_mmss_camss_cphy_csid0_clk>; clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", - "ispif_ahb_clk", "csi_src_clk", "csi_clk", - "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk", - "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src"; - qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0 - 256000000>; + "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src", + "csi_clk", "csi_ahb_clk", "csi_rdi_clk", + "csi_pix_clk", "cphy_csid_clk"; + qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000 + 0 0 0 0 0>; status = "ok"; }; @@ -186,21 +185,20 @@ <&clock_mmss clk_mmss_camss_top_ahb_clk>, <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csi1_clk_src>, + <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csi1_clk>, - <&clock_mmss clk_mmss_camss_csiphy1_clk>, <&clock_mmss clk_mmss_camss_csi1_ahb_clk>, <&clock_mmss clk_mmss_camss_csi1rdi_clk>, <&clock_mmss clk_mmss_camss_csi1pix_clk>, - <&clock_mmss clk_mmss_camss_cphy_csid1_clk>, - <&clock_mmss clk_csiphy_clk_src>; + <&clock_mmss clk_mmss_camss_cphy_csid1_clk>; clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", - "ispif_ahb_clk", "csi_src_clk", "csi_clk", - "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk", - "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src"; - qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0 - 256000000>; + "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src", + "csi_clk", "csi_ahb_clk", "csi_rdi_clk", + "csi_pix_clk", "cphy_csid_clk"; + qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000 + 0 0 0 0 0>; status = "ok"; }; @@ -225,21 +223,20 @@ <&clock_mmss clk_mmss_camss_top_ahb_clk>, <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csi2_clk_src>, + <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csi2_clk>, - <&clock_mmss clk_mmss_camss_csiphy2_clk>, <&clock_mmss clk_mmss_camss_csi2_ahb_clk>, <&clock_mmss clk_mmss_camss_csi2rdi_clk>, <&clock_mmss clk_mmss_camss_csi2pix_clk>, - <&clock_mmss clk_mmss_camss_cphy_csid2_clk>, - <&clock_mmss clk_csiphy_clk_src>; + <&clock_mmss clk_mmss_camss_cphy_csid2_clk>; clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", - "ispif_ahb_clk", "csi_src_clk", "csi_clk", - "csi_phy_clk", "csi_ahb_clk", "csi_rdi_clk", - "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src"; - qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 0 - 256000000>; + "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src", + "csi_clk", "csi_ahb_clk", "csi_rdi_clk", + "csi_pix_clk", "cphy_csid_clk"; + qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000 + 0 0 0 0 0>; status = "ok"; }; @@ -264,20 +261,20 @@ <&clock_mmss clk_mmss_camss_top_ahb_clk>, <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csi3_clk_src>, + <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csi3_clk>, <&clock_mmss clk_mmss_camss_csi3_ahb_clk>, <&clock_mmss clk_mmss_camss_csi3rdi_clk>, <&clock_mmss clk_mmss_camss_csi3pix_clk>, - <&clock_mmss clk_mmss_camss_cphy_csid1_clk>, - <&clock_mmss clk_csiphy_clk_src>; + <&clock_mmss clk_mmss_camss_cphy_csid3_clk>; clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", - "ispif_ahb_clk", "csi_src_clk", "csi_clk", - "csi_ahb_clk", "csi_rdi_clk", - "csi_pix_clk", "cphy_csid_clk", "cphy_clk_src"; - qcom,clock-rates = <0 0 0 0 0 0 0 256000000 0 0 0 0 0 - 256000000>; + "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src", + "csi_clk", "csi_ahb_clk", "csi_rdi_clk", + "csi_pix_clk", "cphy_csid_clk"; + qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000 + 0 0 0 0 0>; status = "ok"; }; @@ -355,14 +352,18 @@ "mmss_fd_ahb_clk", "mmss_camss_cpp_axi_clk", "mmss_camss_cpp_vbif_ahb_clk"; - qcom,clock-rates = <0 0 0 0 0 0 200000000 0 0 0 0>; + qcom,clock-rates = + <0 0 0 0 0 0 404000000 0 0 0 0>, + <0 0 0 0 0 0 100000000 0 0 0 0>, + <0 0 0 0 0 0 404000000 0 0 0 0>, + <0 0 0 0 0 0 404000000 0 0 0 0>; qcom,msm-bus,name = "msm_camera_fd"; qcom,msm-bus,num-cases = <4>; qcom,msm-bus,num-paths = <1>; qcom,msm-bus,vectors-KBps = <106 512 0 0>, - <106 512 13000000 13000000>, - <106 512 45000000 45000000>, - <106 512 90000000 90000000>; + <106 512 1625 0>, + <106 512 2995 0>, + <106 512 7200 0>; qcom,fd-vbif-reg-settings = <0x20 0x10000000 0x30000000>, <0x24 0x10000000 0x30000000>, <0x28 0x10000000 0x30000000>, diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi index ad293d9827d1..fcceac6e2469 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi @@ -21,6 +21,8 @@ qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>; qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>; qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>; + clocks = <&clock_gcc clk_rf_clk2>; + clock-names = "rf_clk2"; qca,bt-vdd-io-voltage-level = <1352000 1352000>; qca,bt-vdd-xtal-voltage-level = <2040000 2040000>; @@ -376,6 +378,8 @@ qcom,mdss-dsi-bl-max-level = <4095>; qcom,5v-boost-gpio = <&tlmm 51 0>; qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,partial-update-enabled; + qcom,panel-roi-alignment = <4 2 4 2 20 20>; }; &dsi_sharp_1080_cmd { diff --git a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi index 1267e578f9b4..e140074465ef 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-gpu.dtsi @@ -75,6 +75,8 @@ qcom,gpu-qdss-stm = <0x161c0000 0x40000>; // base addr, size + qcom,tsens-name = "tsens_tz_sensor12"; + clocks = <&clock_gfx clk_gpucc_gfx3d_clk>, <&clock_gcc clk_gcc_gpu_cfg_ahb_clk>, <&clock_gpu clk_gpucc_rbbmtimer_clk>, diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi index a4ba9a61cded..0278cbde90ce 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi @@ -25,6 +25,7 @@ #include "dsi-panel-sharp-1080p-cmd.dtsi" #include "dsi-panel-jdi-1080p-video.dtsi" #include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi" +#include "dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi" &soc { dsi_panel_pwr_supply: dsi_panel_pwr_supply { @@ -81,6 +82,7 @@ qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,cmd-sync-wait-broadcast; qcom,esd-check-enabled; qcom,mdss-dsi-panel-status-check-mode = "bta_check"; }; @@ -89,6 +91,7 @@ qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,cmd-sync-wait-broadcast; }; &dsi_dual_nt35597_truly_video { @@ -156,3 +159,9 @@ qcom,mdss-dsi-t-clk-post = <0x7>; qcom,mdss-dsi-t-clk-pre = <0x26>; }; + +&dsi_dual_jdi_a407_cmd { + qcom,mdss-dsi-panel-timings = [00 16 05 05 09 0e 05 05 04 03 04 00]; + qcom,mdss-dsi-t-clk-post = <0x06>; + qcom,mdss-dsi-t-clk-pre = <0x22>; +}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi index 99402e3033ed..f9bb6e512d33 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi @@ -22,6 +22,8 @@ qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>; qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>; qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>; + clocks = <&clock_gcc clk_rf_clk2>; + clock-names = "rf_clk2"; qca,bt-vdd-io-voltage-level = <1352000 1352000>; qca,bt-vdd-xtal-voltage-level = <2040000 2040000>; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi index 6018124caf68..a20d80fda72b 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi @@ -24,7 +24,7 @@ qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; qcom,saw2-avs-ctl = <0x1010031>; - qcom,saw2-avs-limit = <0x4000208>; + qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; @@ -40,7 +40,7 @@ qcom,vctl-port = <0x0>; qcom,phase-port = <0x1>; qcom,saw2-avs-ctl = <0x1010031>; - qcom,saw2-avs-limit = <0x4000208>; + qcom,saw2-avs-limit = <0x4580458>; qcom,pfm-port = <0x2>; }; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi index 1ae0ab804eac..5f89985db0a3 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dtsi @@ -126,3 +126,70 @@ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft"; }; }; + +&pmx_mdss { + mdss_dsi_active: mdss_dsi_active { + mux { + pins = "gpio94"; + function = "gpio"; + }; + + config { + pins = "gpio94"; + drive-strength = <8>; /* 8 mA */ + bias-disable = <0>; /* no pull */ + output-high; + }; + }; + + mdss_dsi_suspend: mdss_dsi_suspend { + mux { + pins = "gpio94"; + function = "gpio"; + }; + + config { + pins = "gpio94"; + drive-strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; +}; + +&mdss_mdp { + qcom,mdss-pref-prim-intf = "dsi"; +}; + +&mdss_dsi { + hw-config = "split_dsi"; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_a407_cmd>; + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; + pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; + qcom,platform-reset-gpio = <&tlmm 94 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; +}; + +&mdss_dsi1 { + qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_a407_cmd>; + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; + pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; + qcom,platform-reset-gpio = <&tlmm 94 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; +}; + +&labibb { + status = "ok"; + qpnp,qpnp-labibb-mode = "lcd"; +}; + +&dsi_dual_jdi_a407_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi index e0ae9a8873a7..51e1154beaa9 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd.dtsi @@ -10,7 +10,529 @@ * GNU General Public License for more details. */ -#include "msmcobalt-mtp.dtsi" +#include <dt-bindings/interrupt-controller/irq.h> +#include "msmcobalt-pinctrl.dtsi" +#include "msmcobalt-camera-sensor-qrd.dtsi" +/ { + bluetooth: bt_wcn3990 { + compatible = "qca,wcn3990"; + qca,bt-vdd-io-supply = <&pmcobalt_s3>; + qca,bt-vdd-xtal-supply = <&pmcobalt_s5>; + qca,bt-vdd-core-supply = <&pmcobalt_l7_pin_ctrl>; + qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>; + qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>; + qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>; + + qca,bt-vdd-io-voltage-level = <1352000 1352000>; + qca,bt-vdd-xtal-voltage-level = <2040000 2040000>; + qca,bt-vdd-core-voltage-level = <1800000 1800000>; + qca,bt-vdd-pa-voltage-level = <1304000 1304000>; + qca,bt-vdd-ldo-voltage-level = <3312000 3312000>; + qca,bt-chip-pwd-voltage-level = <3600000 3600000>; + + qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-xtal-current-level = <1>; /* LPM/PFM */ + qca,bt-vdd-core-current-level = <0>; /* LPM/PFM */ + qca,bt-vdd-pa-current-level = <0>; /* LPM/PFM */ + qca,bt-vdd-ldo-current-level = <0>; /* LPM/PFM */ + }; +}; + +&blsp1_uart3_hs { + status = "ok"; +}; + +&ufsphy1 { + vdda-phy-supply = <&pmcobalt_l1>; + vdda-pll-supply = <&pmcobalt_l2>; + vddp-ref-clk-supply = <&pmcobalt_l26>; + vdda-phy-max-microamp = <51400>; + vdda-pll-max-microamp = <14600>; + vddp-ref-clk-max-microamp = <100>; + vddp-ref-clk-always-on; + status = "ok"; +}; + +&ufs1 { + vdd-hba-supply = <&gdsc_ufs>; + vdd-hba-fixed-regulator; + vcc-supply = <&pmcobalt_l20>; + vccq-supply = <&pmcobalt_l26>; + vccq2-supply = <&pmcobalt_s4>; + vcc-max-microamp = <750000>; + vccq-max-microamp = <560000>; + vccq2-max-microamp = <750000>; + status = "ok"; +}; + +&ufs_ice { + status = "ok"; +}; + +&sdhc_2 { + vdd-supply = <&pmcobalt_l21>; + qcom,vdd-voltage-level = <2950000 2960000>; + qcom,vdd-current-level = <200 800000>; + + vdd-io-supply = <&pmcobalt_l13>; + qcom,vdd-io-voltage-level = <1808000 2960000>; + qcom,vdd-io-current-level = <200 22000>; + + pinctrl-names = "active", "sleep"; + pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; + pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; + + qcom,clk-rates = <400000 20000000 25000000 + 50000000 100000000 200000000>; + qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104"; + + cd-gpios = <&tlmm 95 0x1>; + + status = "ok"; +}; + +&uartblsp2dm1 { + status = "ok"; + pinctrl-names = "default"; + pinctrl-0 = <&uart_console_active>; +}; + +&pmcobalt_gpios { + /* GPIO 6 for Vol+ Key */ + gpio@c500 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + /* GPIO 7 for Snapshot Key */ + gpio@c600 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + /* GPIO 8 for Focus Key */ + gpio@c700 { + status = "okay"; + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,out-strength = <1>; + }; + + gpio@cc00 { /* GPIO 13 */ + qcom,mode = <1>; + qcom,output-type = <0>; + qcom,pull = <5>; + qcom,vin-sel = <0>; + qcom,out-strength = <1>; + qcom,src-sel = <3>; + qcom,master-en = <1>; + status = "okay"; + }; + + /* GPIO 21 (NFC_CLK_REQ) */ + gpio@d400 { + qcom,mode = <0>; + qcom,vin-sel = <1>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; + + /* GPIO 18 SMB138X */ + gpio@d100 { + qcom,mode = <0>; + qcom,pull = <0>; + qcom,vin-sel = <0>; + qcom,src-sel = <0>; + qcom,master-en = <1>; + status = "okay"; + }; +}; + +&i2c_5 { + status = "okay"; + synaptics@20 { + compatible = "synaptics,dsx"; + reg = <0x20>; + interrupt-parent = <&tlmm>; + interrupts = <125 0x2008>; + vdd-supply = <&pmcobalt_l6>; + avdd-supply = <&pmcobalt_l28>; + synaptics,vdd-voltage = <1808000 1808000>; + synaptics,avdd-voltage = <3008000 3008000>; + synaptics,vdd-current = <40000>; + synaptics,avdd-current = <20000>; + pinctrl-names = "pmx_ts_active", "pmx_ts_suspend"; + pinctrl-0 = <&ts_active>; + pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>; + synaptics,display-coords = <0 0 1439 2559>; + synaptics,panel-coords = <0 0 1439 2559>; + synaptics,reset-gpio = <&tlmm 89 0x00>; + synaptics,irq-gpio = <&tlmm 125 0x2008>; + synaptics,disable-gpios; + synaptics,fw-name = "PR1702898-s3528t_60QHD_00400001.img"; + }; +}; + +&i2c_6 { /* BLSP1 QUP6 (NFC) */ + status = "okay"; + nq@28 { + compatible = "qcom,nq-nci"; + reg = <0x28>; + qcom,nq-irq = <&tlmm 92 0x00>; + qcom,nq-ven = <&tlmm 12 0x00>; + qcom,nq-firm = <&tlmm 93 0x00>; + qcom,nq-clkreq = <&pmcobalt_gpios 21 0x00>; + qcom,nq-esepwr = <&tlmm 116 0x00>; + interrupt-parent = <&tlmm>; + qcom,clk-src = "BBCLK3"; + interrupts = <92 0>; + interrupt-names = "nfc_irq"; + pinctrl-names = "nfc_active", "nfc_suspend"; + pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; + clocks = <&clock_gcc clk_ln_bb_clk3_pin>; + clock-names = "ref_clk"; + }; +}; + +&mdss_hdmi_tx { + pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active", + "hdmi_active", "hdmi_sleep"; + pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; + pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>; + pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>; + pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active + &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>; + pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend + &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>; +}; + +&mdss_dp_ctrl { + pinctrl-names = "mdss_dp_active", "mdss_dp_sleep"; + pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>; + pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>; + qcom,aux-en-gpio = <&tlmm 77 0>; + qcom,aux-sel-gpio = <&tlmm 78 0>; + qcom,usbplug-cc-gpio = <&tlmm 38 0>; +}; + +&mdss_mdp { + qcom,mdss-pref-prim-intf = "dsi"; +}; + +&mdss_dsi { + hw-config = "split_dsi"; +}; + +&mdss_dsi0 { + qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>; + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; + pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; + qcom,platform-reset-gpio = <&tlmm 94 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,panel-mode-gpio = <&tlmm 91 0>; +}; + +&mdss_dsi1 { + qcom,dsi-pref-prim-pan = <&dsi_dual_nt35597_video>; + pinctrl-names = "mdss_default", "mdss_sleep"; + pinctrl-0 = <&mdss_dsi_active &mdss_te_active>; + pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>; + qcom,platform-reset-gpio = <&tlmm 94 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,panel-mode-gpio = <&tlmm 91 0>; +}; + +&labibb { + status = "ok"; + qpnp,qpnp-labibb-mode = "lcd"; +}; + +&dsi_dual_nt35597_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_dual_nt35597_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_dual_nt35597_truly_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_dual_nt35597_truly_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_nt35597_dsc_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_nt35597_dsc_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "single_port"; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_sharp_4k_dsc_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_sharp_4k_dsc_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_dual_jdi_video { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,5v-boost-gpio = <&tlmm 51 0>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_dual_jdi_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,5v-boost-gpio = <&tlmm 51 0>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_sharp_1080_cmd { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; +}; + +&dsi_jdi_1080_vid { + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>; + qcom,5v-boost-gpio = <&tlmm 51 0>; +}; + +&i2c_7 { + status = "okay"; + qcom,smb138x@8 { + compatible = "qcom,i2c-pmic"; + reg = <0x8>; + #address-cells = <2>; + #size-cells = <0>; + interrupt-parent = <&spmi_bus>; + interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>; + interrupt_names = "smb138x"; + interrupt-controller; + #interrupt-cells = <3>; + qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>; + + smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 { + compatible = "qcom,smb138x-parallel-slave"; + reg = <0x1000 0x700>; + }; + }; +}; + +&pmicobalt_haptics { + status = "okay"; +}; + +&pmcobalt_vadc { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@85 { + label = "vcoin"; + reg = <0x85>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; +}; + +&pmcobalt_adc_tm { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,btm-channel-number = <0x60>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x68>; + qcom,thermal-node; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x70>; + qcom,thermal-node; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x78>; + qcom,thermal-node; + }; +}; + +&wil6210 { + status = "ok"; +}; + +&soc { + sound-9335 { + qcom,wcn-btfm; + }; + + gpio_keys { + compatible = "gpio-keys"; + input-name = "gpio-keys"; + status = "okay"; + + vol_up { + label = "volume_up"; + gpios = <&pmcobalt_gpios 6 0x1>; + linux,input-type = <1>; + linux,code = <115>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + + cam_snapshot { + label = "cam_snapshot"; + gpios = <&pmcobalt_gpios 7 0x1>; + linux,input-type = <1>; + linux,code = <766>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + + cam_focus { + label = "cam_focus"; + gpios = <&pmcobalt_gpios 8 0x1>; + linux,input-type = <1>; + linux,code = <528>; + gpio-key,wakeup; + debounce-interval = <15>; + }; + }; +}; + +/{ + mtp_batterydata: qcom,battery-data { + qcom,batt-id-range-pct = <15>; + #include "fg-gen3-batterydata-itech-3000mah.dtsi" + #include "fg-gen3-batterydata-ascent-3450mah.dtsi" + }; +}; &mdss_mdp { qcom,mdss-pref-prim-intf = "dsi"; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi index c2d45ec3ef07..2a61cccad273 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi @@ -537,6 +537,28 @@ qcom,enable-time = <500>; }; }; + + qcom,pmcobalt@1 { + pmcobalt_s10: regulator@2f00 { + compatible = "qcom,qpnp-regulator"; + reg = <0x2f00 0x100>; + regulator-name = "pmcobalt_s10"; + regulator-min-microvolt = <572000>; + regulator-max-microvolt = <1112000>; + qcom,enable-time = <500>; + regulator-always-on; + }; + + pmcobalt_s13: regulator@3800 { + compatible = "qcom,qpnp-regulator"; + reg = <0x3800 0x100>; + regulator-name = "pmcobalt_s13"; + regulator-min-microvolt = <572000>; + regulator-max-microvolt = <1112000>; + qcom,enable-time = <500>; + regulator-always-on; + }; + }; }; /* Stub regulators */ @@ -590,6 +612,9 @@ qcom,cpr-panic-reg-name-list = "PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS"; + qcom,cpr-aging-ref-voltage = <1112000>; + vdd-supply = <&pmcobalt_s10>; + thread@0 { qcom,cpr-thread-id = <0>; qcom,cpr-consecutive-up = <0>; @@ -712,6 +737,13 @@ qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; qcom,cpr-scaled-open-loop-voltage-as-ceiling; + + qcom,cpr-aging-max-voltage-adjustment = <15000>; + qcom,cpr-aging-ref-corner = <22>; + qcom,cpr-aging-ro-scaling-factor = <2950>; + qcom,allow-aging-voltage-adjustment = <0>; + qcom,allow-aging-open-loop-voltage-adjustment = + <1>; }; }; }; @@ -752,6 +784,9 @@ qcom,cpr-panic-reg-name-list = "PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS"; + qcom,cpr-aging-ref-voltage = <1112000>; + vdd-supply = <&pmcobalt_s13>; + thread@0 { qcom,cpr-thread-id = <0>; qcom,cpr-consecutive-up = <0>; @@ -894,6 +929,13 @@ qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; qcom,cpr-scaled-open-loop-voltage-as-ceiling; + + qcom,cpr-aging-max-voltage-adjustment = <15000>; + qcom,cpr-aging-ref-corner = <25>; + qcom,cpr-aging-ro-scaling-factor = <2950>; + qcom,allow-aging-voltage-adjustment = <0>; + qcom,allow-aging-open-loop-voltage-adjustment = + <1>; }; }; }; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi index 99d80a3b3848..fcc4d6d8ee2d 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-v2-camera.dtsi @@ -19,7 +19,10 @@ reg-names = "csiphy"; interrupts = <0 78 0>; interrupt-names = "csiphy"; - clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>, + gdscr-supply = <&gdsc_camss_top>; + bimc_smmu-supply = <&gdsc_bimc_smmu>; + qcom,cam-vreg-name = "gdscr", "bimc_smmu"; + clocks = <&clock_gcc clk_mmssnoc_axi_clk>, <&clock_mmss clk_mmss_mnoc_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_axi_clk>, @@ -33,13 +36,13 @@ <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csiphy0_clk>; - clock-names = "mnoc_maxi", "mnoc_ahb", + clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", "csi_src_clk", "csi_clk", "cphy_csid_clk", "csiphy_timer_src_clk", "csiphy_timer_clk", "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk"; - qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0 + qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0 0 256000000 0>; status = "ok"; }; @@ -51,7 +54,10 @@ reg-names = "csiphy"; interrupts = <0 79 0>; interrupt-names = "csiphy"; - clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>, + gdscr-supply = <&gdsc_camss_top>; + bimc_smmu-supply = <&gdsc_bimc_smmu>; + qcom,cam-vreg-name = "gdscr", "bimc_smmu"; + clocks = <&clock_gcc clk_mmssnoc_axi_clk>, <&clock_mmss clk_mmss_mnoc_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_axi_clk>, @@ -65,13 +71,13 @@ <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csiphy1_clk>; - clock-names = "mnoc_maxi", "mnoc_ahb", + clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", "csi_src_clk", "csi_clk", "cphy_csid_clk", "csiphy_timer_src_clk", "csiphy_timer_clk", "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk"; - qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0 + qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0 0 256000000 0>; status = "ok"; }; @@ -83,7 +89,10 @@ reg-names = "csiphy"; interrupts = <0 80 0>; interrupt-names = "csiphy"; - clocks = <&clock_mmss clk_mmss_mnoc_maxi_clk>, + gdscr-supply = <&gdsc_camss_top>; + bimc_smmu-supply = <&gdsc_bimc_smmu>; + qcom,cam-vreg-name = "gdscr", "bimc_smmu"; + clocks = <&clock_gcc clk_mmssnoc_axi_clk>, <&clock_mmss clk_mmss_mnoc_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>, <&clock_mmss clk_mmss_bimc_smmu_axi_clk>, @@ -97,13 +106,13 @@ <&clock_mmss clk_mmss_camss_ispif_ahb_clk>, <&clock_mmss clk_csiphy_clk_src>, <&clock_mmss clk_mmss_camss_csiphy2_clk>; - clock-names = "mnoc_maxi", "mnoc_ahb", + clock-names = "mmssnoc_axi", "mnoc_ahb", "bmic_smmu_ahb", "bmic_smmu_axi", "camss_ahb_clk", "camss_top_ahb_clk", "csi_src_clk", "csi_clk", "cphy_csid_clk", "csiphy_timer_src_clk", "csiphy_timer_clk", "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk"; - qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 269333333 0 + qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0 0 256000000 0>; status = "ok"; }; diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi index b255fca6a691..9b791d6b7fb0 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi @@ -227,9 +227,20 @@ qcom,max-bandwidth-per-pipe-kbps = <4700000>; }; +&pmcobalt_s10 { + regulator-min-microvolt = <568000>; + regulator-max-microvolt = <1056000>; +}; + +&pmcobalt_s13 { + regulator-min-microvolt = <568000>; + regulator-max-microvolt = <1056000>; +}; + &apc0_cpr { compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator"; qcom,cpr-corner-switch-delay-time = <1042>; + qcom,cpr-aging-ref-voltage = <1056000>; }; &apc0_pwrcl_vreg { @@ -371,11 +382,16 @@ qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; qcom,cpr-scaled-open-loop-voltage-as-ceiling; + + qcom,cpr-aging-ref-corner = <22 22>; + qcom,cpr-aging-ro-scaling-factor = <2950>; + qcom,allow-aging-voltage-adjustment = <0>; }; &apc1_cpr { compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator"; qcom,cpr-corner-switch-delay-time = <1042>; + qcom,cpr-aging-ref-voltage = <1056000>; }; &apc1_perfcl_vreg { @@ -527,6 +543,10 @@ qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; qcom,cpr-scaled-open-loop-voltage-as-ceiling; + + qcom,cpr-aging-ref-corner = <30 26>; + qcom,cpr-aging-ro-scaling-factor = <2950>; + qcom,allow-aging-voltage-adjustment = <0>; }; &pm8005_s1 { @@ -674,6 +694,35 @@ <355200000 3570000>,/* imem @ svs freq 171 Mhz */ <444000000 6750000>,/* imem @ nom freq 323 Mhz */ <533000000 8490000>;/* imem @ turbo freq 406 Mhz */ + + qcom,dcvs-tbl = /* minLoad LoadLow LoadHigh CodecCheck */ + /* Decode */ + /* Load > Nominal, Nominal <-> Turbo Eg.3840x2160@60 */ + <1728000 1728000 2211840 0x3f00000c>, + /* Encoder */ + /* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */ + <972000 972000 1105920 0x04000004>, + /* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */ + <939700 939700 972000 0x04000004>, + /* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */ + <489600 489600 939700 0x04000004>; + + qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */ + <32400 30>, /* Encoder 3840x2160@30 */ + <32400 60>; /* Decoder 3840x2160@60 */ + +}; + +&soc { + /* Gold L2 SAW */ + qcom,spm@178120000 { + qcom,saw2-avs-limit = <0x4200420>; + }; + + /* Silver L2 SAW */ + qcom,spm@179120000 { + qcom,saw2-avs-limit = <0x4200420>; + }; }; /* GPU overrides */ diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi index 2600fa25b73f..96518dca0ec1 100644 --- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi +++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi @@ -2694,6 +2694,11 @@ reg = <0x10 8>; }; + dload_type@18 { + compatible = "qcom,msm-imem-dload-type"; + reg = <0x18 4>; + }; + restart_reason@65c { compatible = "qcom,msm-imem-restart_reason"; reg = <0x65c 4>; diff --git a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi index 11f602d842bc..cb5fce378b6c 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi @@ -39,8 +39,8 @@ qcom,qos-off = <4096>; qcom,base-offset = <16384>; clock-names = "bus_clk", "bus_a_clk"; - clocks = <&clock_gcc RPM_AGGRE2_NOC_CLK>, - <&clock_gcc RPM_AGGRE2_NOC_A_CLK>; + clocks = <&clock_rpmcc RPM_AGGR2_NOC_CLK>, + <&clock_rpmcc RPM_AGGR2_NOC_A_CLK>; }; fab_bimc: fab-bimc { @@ -52,8 +52,8 @@ qcom,bypass-qos-prg; qcom,util-fact = <153>; clock-names = "bus_clk", "bus_a_clk"; - clocks = <&clock_gcc RPM_BIMC_MSMBUS_CLK>, - <&clock_gcc RPM_BIMC_MSMBUS_A_CLK>; + clocks = <&clock_rpmcc BIMC_MSMBUS_CLK>, + <&clock_rpmcc BIMC_MSMBUS_A_CLK>; }; fab_cnoc: fab-cnoc { @@ -64,8 +64,8 @@ qcom,bypass-qos-prg; qcom,bus-type = <1>; clock-names = "bus_clk", "bus_a_clk"; - clocks = <&clock_gcc RPM_CNOC_MSMBUS_CLK>, - <&clock_gcc RPM_CNOC_MSMBUS_A_CLK>; + clocks = <&clock_rpmcc CNOC_MSMBUS_CLK>, + <&clock_rpmcc CNOC_MSMBUS_A_CLK>; }; fab_gnoc: fab-gnoc { @@ -87,8 +87,8 @@ qcom,base-offset = <20480>; qcom,util-fact = <154>; clock-names = "bus_clk", "bus_a_clk"; - clocks = <&clock_gcc RPM_MMSSNOC_AXI_CLK>, - <&clock_gcc RPM_MMSSNOC_AXI_A_CLK>; + clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>, + <&clock_rpmcc MMSSNOC_AXI_A_CLK>; }; fab_snoc: fab-snoc { @@ -101,8 +101,8 @@ qcom,qos-off = <4096>; qcom,base-offset = <24576>; clock-names = "bus_clk", "bus_a_clk"; - clocks = <&clock_gcc RPM_SNOC_MSMBUS_CLK>, - <&clock_gcc RPM_SNOC_MSMBUS_A_CLK>; + clocks = <&clock_rpmcc SNOC_MSMBUS_CLK>, + <&clock_rpmcc SNOC_MSMBUS_A_CLK>; }; fab_mnoc_ahb: fab-mnoc-ahb { diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi index b60d4013dad8..3826b00bf09e 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi @@ -30,8 +30,8 @@ coresight-name = "coresight-tmc-etr"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; port{ @@ -80,8 +80,8 @@ coresight-ctis = <&cti0 &cti8>; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; ports{ @@ -115,8 +115,8 @@ coresight-name = "coresight-funnel-merg"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; ports { @@ -150,8 +150,8 @@ coresight-name = "coresight-funnel-in0"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; ports { @@ -193,8 +193,8 @@ coresight-name = "coresight-stm"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; port{ @@ -211,8 +211,8 @@ coresight-name = "coresight-cti0"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -223,8 +223,8 @@ coresight-name = "coresight-cti1"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -235,8 +235,8 @@ coresight-name = "coresight-cti2"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -247,8 +247,8 @@ coresight-name = "coresight-cti3"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -259,8 +259,8 @@ coresight-name = "coresight-cti4"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -271,8 +271,8 @@ coresight-name = "coresight-cti5"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -283,8 +283,8 @@ coresight-name = "coresight-cti6"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -295,8 +295,8 @@ coresight-name = "coresight-cti7"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -307,8 +307,8 @@ coresight-name = "coresight-cti8"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -319,8 +319,8 @@ coresight-name = "coresight-cti9"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -331,8 +331,8 @@ coresight-name = "coresight-cti10"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -343,8 +343,8 @@ coresight-name = "coresight-cti11"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -355,8 +355,8 @@ coresight-name = "coresight-cti12"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -367,8 +367,8 @@ coresight-name = "coresight-cti13"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -379,8 +379,8 @@ coresight-name = "coresight-cti14"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -391,8 +391,8 @@ coresight-name = "coresight-cti15"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; }; @@ -405,8 +405,8 @@ coresight-name = "coresight-funnel-qatb"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "apb_pclk", "core_a_clk"; ports { @@ -451,8 +451,8 @@ <5 32>, <9 64>; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; ports { @@ -483,8 +483,8 @@ coresight-name = "coresight-tpdm-dcc"; - clocks = <&clock_gcc RPM_QDSS_CLK>, - <&clock_gcc RPM_QDSS_A_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>, + <&clock_rpmcc RPM_QDSS_A_CLK>; clock-names = "core_clk", "core_a_clk"; port{ diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi index 67748d6683c0..df04d1a57683 100644 --- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi +++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi @@ -14,6 +14,7 @@ #include <dt-bindings/clock/qcom,gcc-msmfalcon.h> #include <dt-bindings/clock/qcom,gpu-msmfalcon.h> #include <dt-bindings/clock/qcom,mmcc-msmfalcon.h> +#include <dt-bindings/clock/qcom,rpmcc.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/regulator/qcom,rpm-smd-regulator.h> @@ -135,6 +136,22 @@ }; }; + clocks { + xo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <19200000>; + clock-output-names = "xo_board"; + }; + + sleep_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32764>; + clock-output-names = "sleep_clk"; + }; + }; + soc: soc { }; reserved-memory { @@ -360,19 +377,31 @@ }; }; - clock_gcc: qcom,dummycc { + clock_rpmcc: qcom,dummycc { + compatible = "qcom,dummycc"; + clock-output-names = "rpmcc_clocks"; + #clock-cells = <1>; + }; + + clock_gcc: clock-controller@100000 { compatible = "qcom,dummycc"; + clock-output-names = "gcc_clocks"; #clock-cells = <1>; + #reset-cells = <1>; }; - clock_mmss: qcom,dummycc { + clock_mmss: clock-controller@c8c0000 { compatible = "qcom,dummycc"; + clock-output-names = "mmss_clocks"; #clock-cells = <1>; + #reset-cells = <1>; }; - clock_gfx: qcom,dummycc { + clock_gfx: clock-controller@5065000 { compatible = "qcom,dummycc"; + clock-output-names = "gfx_clocks"; #clock-cells = <1>; + #reset-cells = <1>; }; qcom,ipc-spinlock@1f40000 { @@ -398,7 +427,7 @@ <0x10b4000 0x800>; reg-names = "dcc-base", "dcc-ram-base"; - clocks = <&clock_gcc RPM_QDSS_CLK>; + clocks = <&clock_rpmcc RPM_QDSS_CLK>; clock-names = "dcc_clk"; }; @@ -620,6 +649,27 @@ memory-region = <&venus_fw_mem>; status = "ok"; }; + + qcom,icnss@18800000 { + status = "disabled"; + compatible = "qcom,icnss"; + reg = <0x18800000 0x800000>, + <0x10ac000 0x20>; + reg-names = "membase", "mpm_config"; + interrupts = <0 413 0>, /* CE0 */ + <0 414 0>, /* CE1 */ + <0 415 0>, /* CE2 */ + <0 416 0>, /* CE3 */ + <0 417 0>, /* CE4 */ + <0 418 0>, /* CE5 */ + <0 420 0>, /* CE6 */ + <0 421 0>, /* CE7 */ + <0 422 0>, /* CE8 */ + <0 423 0>, /* CE9 */ + <0 424 0>, /* CE10 */ + <0 425 0>; /* CE11 */ + qcom,wlan-msa-memory = <0x100000>; + }; }; #include "msmfalcon-ion.dtsi" diff --git a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi new file mode 100644 index 000000000000..f6deef335844 --- /dev/null +++ b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi @@ -0,0 +1,52 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,ion { + compatible = "qcom,msm-ion"; + #address-cells = <1>; + #size-cells = <0>; + + system_heap: qcom,ion-heap@25 { + reg = <25>; + qcom,ion-heap-type = "SYSTEM"; + }; + + system_contig_heap: qcom,ion-heap@21 { + reg = <21>; + qcom,ion-heap-type = "SYSTEM_CONTIG"; + }; + + qcom,ion-heap@22 { /* ADSP HEAP */ + reg = <22>; + memory-region = <&adsp_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@27 { /* QSEECOM HEAP */ + reg = <27>; + memory-region = <&qseecom_mem>; + qcom,ion-heap-type = "DMA"; + }; + + qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */ + reg = <10>; + memory-region = <&secure_display_memory>; + qcom,ion-heap-type = "HYP_CMA"; + }; + + qcom,ion-heap@9 { + reg = <9>; + qcom,ion-heap-type = "SYSTEM_SECURE"; + }; + }; +}; diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi index 3f0d4cc48696..71b4da9bb2d0 100644 --- a/arch/arm/boot/dts/qcom/msmtriton.dtsi +++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi @@ -14,6 +14,7 @@ #include <dt-bindings/clock/qcom,gcc-msmfalcon.h> #include <dt-bindings/clock/qcom,gpu-msmfalcon.h> #include <dt-bindings/clock/qcom,mmcc-msmfalcon.h> +#include <dt-bindings/clock/qcom,rpmcc.h> #include <dt-bindings/interrupt-controller/arm-gic.h> / { @@ -134,6 +135,22 @@ }; }; + clocks { + xo_board { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <19200000>; + clock-output-names = "xo_board"; + }; + + sleep_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32764>; + clock-output-names = "sleep_clk"; + }; + }; + soc: soc { }; reserved-memory { @@ -308,19 +325,31 @@ }; }; - clock_gcc: qcom,dummycc { + clock_rpmcc: qcom,dummycc { compatible = "qcom,dummycc"; + clock-output-names = "rpmcc_clocks"; #clock-cells = <1>; }; - clock_mmss: qcom,dummycc { + clock_gcc: clock-controller@100000 { compatible = "qcom,dummycc"; + clock-output-names = "gcc_clocks"; #clock-cells = <1>; + #reset-cells = <1>; }; - clock_gfx: qcom,dummycc { + clock_mmss: clock-controller@c8c0000 { compatible = "qcom,dummycc"; + clock-output-names = "mmss_clocks"; #clock-cells = <1>; + #reset-cells = <1>; + }; + + clock_gfx: clock-controller@5065000 { + compatible = "qcom,dummycc"; + clock-output-names = "gfx_clocks"; + #clock-cells = <1>; + #reset-cells = <1>; }; qcom,ipc-spinlock@1f40000 { @@ -490,4 +519,27 @@ qcom,xprt-version = <1>; qcom,fragmented-data; }; + + qcom,icnss@18800000 { + status = "disabled"; + compatible = "qcom,icnss"; + reg = <0x18800000 0x800000>, + <0x10ac000 0x20>; + reg-names = "membase", "mpm_config"; + interrupts = <0 413 0>, /* CE0 */ + <0 414 0>, /* CE1 */ + <0 415 0>, /* CE2 */ + <0 416 0>, /* CE3 */ + <0 417 0>, /* CE4 */ + <0 418 0>, /* CE5 */ + <0 420 0>, /* CE6 */ + <0 421 0>, /* CE7 */ + <0 422 0>, /* CE8 */ + <0 423 0>, /* CE9 */ + <0 424 0>, /* CE10 */ + <0 425 0>; /* CE11 */ + qcom,wlan-msa-memory = <0x100000>; + }; }; + +#include "msmtriton-ion.dtsi" diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig index db50dce9f9a4..64da50bb55b2 100644 --- a/arch/arm/configs/msmfalcon_defconfig +++ b/arch/arm/configs/msmfalcon_defconfig @@ -222,6 +222,8 @@ CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_HDCP_QSEECOM=y CONFIG_UID_CPUTIME=y CONFIG_MSM_ULTRASOUND=y CONFIG_SCSI=y @@ -330,6 +332,12 @@ CONFIG_MSM_SDE_ROTATOR=y CONFIG_QCOM_KGSL=y CONFIG_FB=y CONFIG_FB_VIRTUAL=y +CONFIG_FB_MSM=y +CONFIG_FB_MSM_MDSS=y +CONFIG_FB_MSM_MDSS_WRITEBACK=y +CONFIG_FB_MSM_MDSS_HDMI_PANEL=y +CONFIG_FB_MSM_MDSS_DP_PANEL=y +CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set @@ -399,7 +407,6 @@ CONFIG_STAGING=y CONFIG_ASHMEM=y CONFIG_ANDROID_TIMED_GPIO=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y -CONFIG_SYNC=y CONFIG_ION=y CONFIG_ION_MSM=y CONFIG_QPNP_REVID=y @@ -413,6 +420,7 @@ CONFIG_IPA3=y CONFIG_RMNET_IPA3=y CONFIG_GPIO_USB_DETECT=y CONFIG_USB_BAM=y +CONFIG_MSM_MDSS_PLL=y CONFIG_REMOTE_SPINLOCK_MSM=y CONFIG_ARM_SMMU=y CONFIG_IOMMU_DEBUG=y diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig index 6ba13806cf16..5f8b02904d49 100644 --- a/arch/arm64/configs/msm-perf_defconfig +++ b/arch/arm64/configs/msm-perf_defconfig @@ -547,6 +547,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig index bc5fc905188c..c1c0ae9da001 100644 --- a/arch/arm64/configs/msm_defconfig +++ b/arch/arm64/configs/msm_defconfig @@ -554,6 +554,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig index 938e11151050..036d6aa5c062 100644 --- a/arch/arm64/configs/msmcortex-perf_defconfig +++ b/arch/arm64/configs/msmcortex-perf_defconfig @@ -555,6 +555,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig index 8674161f29e9..77f0129776a3 100644 --- a/arch/arm64/configs/msmcortex_defconfig +++ b/arch/arm64/configs/msmcortex_defconfig @@ -575,6 +575,9 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT3_FS=y CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_EXT4_FS_ENCRYPTION=y +CONFIG_EXT4_FS_ICE_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig index 34f0da3c37a4..348c34a94119 100644 --- a/arch/arm64/configs/msmfalcon_defconfig +++ b/arch/arm64/configs/msmfalcon_defconfig @@ -519,6 +519,7 @@ CONFIG_QCOM_WATCHDOG_V2=y CONFIG_QCOM_IRQ_HELPER=y CONFIG_QCOM_MEMORY_DUMP_V2=y CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y CONFIG_MSM_GLADIATOR_ERP_V2=y CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y CONFIG_MSM_GLADIATOR_HANG_DETECT=y diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index e5389bc981ee..eff70892dada 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -49,6 +49,17 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, return prot; } +static int __get_iommu_pgprot(struct dma_attrs *attrs, int prot, + bool coherent) +{ + if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) + prot |= IOMMU_NOEXEC; + if (coherent) + prot |= IOMMU_CACHE; + + return prot; +} + static struct gen_pool *atomic_pool; #define NO_KERNEL_MAPPING_DUMMY 0x2222 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -1153,7 +1164,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask) /* IOMMU */ static void __dma_clear_buffer(struct page *page, size_t size, - struct dma_attrs *attrs) + struct dma_attrs *attrs, bool is_coherent) { /* * Ensure that the allocated pages are zeroed, and that any data @@ -1162,7 +1173,8 @@ static void __dma_clear_buffer(struct page *page, size_t size, void *ptr = page_address(page); if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) memset(ptr, 0, size); - dmac_flush_range(ptr, ptr + size); + if (!is_coherent) + dmac_flush_range(ptr, ptr + size); } static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, @@ -1212,6 +1224,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, size_t count = size >> PAGE_SHIFT; size_t array_size = count * sizeof(struct page *); int i = 0; + bool is_coherent = is_device_dma_coherent(dev); if (array_size <= PAGE_SIZE) pages = kzalloc(array_size, gfp); @@ -1228,7 +1241,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!page) goto error; - __dma_clear_buffer(page, size, attrs); + __dma_clear_buffer(page, size, attrs, is_coherent); for (i = 0; i < count; i++) pages[i] = page + i; @@ -1257,7 +1270,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, pages[i + j] = pages[i] + j; } - __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs); + __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs, + is_coherent); i += 1 << order; count -= 1 << order; } @@ -1322,9 +1336,8 @@ static dma_addr_t __iommu_create_mapping(struct device *dev, dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) return dma_addr; - - if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) - prot |= IOMMU_NOEXEC; + prot = __get_iommu_pgprot(attrs, prot, + is_device_dma_coherent(dev)); iova = dma_addr; for (i = 0; i < count; ) { @@ -1404,6 +1417,7 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size, size_t array_size = count * sizeof(struct page *); int i; void *addr; + bool coherent = is_device_dma_coherent(dev); if (array_size <= PAGE_SIZE) pages = kzalloc(array_size, gfp); @@ -1413,7 +1427,13 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size, if (!pages) return NULL; - addr = __alloc_from_pool(size, &page, gfp); + if (coherent) { + page = alloc_pages(gfp, get_order(size)); + addr = page ? page_address(page) : NULL; + } else { + addr = __alloc_from_pool(size, &page, gfp); + } + if (!addr) goto err_free; @@ -1428,7 +1448,10 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size, return addr; err_mapping: - __free_from_pool(addr, size); + if (coherent) + __free_pages(page, get_order(size)); + else + __free_from_pool(addr, size); err_free: kvfree(pages); return NULL; @@ -1444,7 +1467,8 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr, static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false); + bool coherent = is_device_dma_coherent(dev); + pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); struct page **pages; void *addr = NULL; @@ -1495,8 +1519,10 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; struct page **pages = __iommu_get_pages(cpu_addr, attrs); + bool coherent = is_device_dma_coherent(dev); - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false); + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, + coherent); if (!pages) return -ENXIO; @@ -1577,121 +1603,6 @@ static int __dma_direction_to_prot(enum dma_data_direction dir) return prot; } -/* - * Map a part of the scatter-gather list into contiguous io address space - */ -static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, - size_t size, dma_addr_t *handle, - enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) -{ - struct dma_iommu_mapping *mapping = dev->archdata.mapping; - dma_addr_t iova, iova_base; - int ret = 0; - unsigned int count; - struct scatterlist *s; - int prot; - - size = PAGE_ALIGN(size); - *handle = DMA_ERROR_CODE; - - iova_base = iova = __alloc_iova(mapping, size); - if (iova == DMA_ERROR_CODE) - return -ENOMEM; - - for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { - phys_addr_t phys = page_to_phys(sg_page(s)); - unsigned int len = PAGE_ALIGN(s->offset + s->length); - - if (!is_coherent && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) - __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, - dir); - - prot = __dma_direction_to_prot(dir); - if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) - prot |= IOMMU_NOEXEC; - - ret = iommu_map(mapping->domain, iova, phys, len, prot); - if (ret < 0) - goto fail; - count += len >> PAGE_SHIFT; - iova += len; - } - *handle = iova_base; - - return 0; -fail: - iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); - __free_iova(mapping, iova_base, size); - return ret; -} - -static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) -{ - struct scatterlist *s = sg, *dma = sg, *start = sg; - int i, count = 0; - unsigned int offset = s->offset; - unsigned int size = s->offset + s->length; - unsigned int max = dma_get_max_seg_size(dev); - - for (i = 1; i < nents; i++) { - s = sg_next(s); - - s->dma_address = DMA_ERROR_CODE; - s->dma_length = 0; - - if (s->offset || (size & ~PAGE_MASK) - || size + s->length > max) { - if (__map_sg_chunk(dev, start, size, &dma->dma_address, - dir, attrs, is_coherent) < 0) - goto bad_mapping; - - dma->dma_address += offset; - dma->dma_length = size - offset; - - size = offset = s->offset; - start = s; - dma = sg_next(dma); - count += 1; - } - size += s->length; - } - if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, - is_coherent) < 0) - goto bad_mapping; - - dma->dma_address += offset; - dma->dma_length = size - offset; - - return count+1; - -bad_mapping: - for_each_sg(sg, s, count, i) - __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - return 0; -} - -/** - * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Map a set of i/o coherent buffers described by scatterlist in streaming - * mode for DMA. The scatter gather list elements are merged together (if - * possible) and tagged with the appropriate dma address and length. They are - * obtained via sg_dma_{address,length}. - */ -int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) -{ - return __iommu_map_sg(dev, sg, nents, dir, attrs, true); -} - /** * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer @@ -1722,9 +1633,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, dev_err(dev, "Couldn't allocate iova for sg %p\n", sg); return 0; } - - if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) - prot |= IOMMU_NOEXEC; + prot = __get_iommu_pgprot(attrs, prot, + is_device_dma_coherent(dev)); ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot); if (ret != total_length) { @@ -1741,40 +1651,6 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, return nents; } -static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) -{ - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) { - if (sg_dma_len(s)) - __iommu_remove_mapping(dev, sg_dma_address(s), - sg_dma_len(s)); - if (!is_coherent && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) - __dma_page_dev_to_cpu(sg_page(s), s->offset, - s->length, dir); - } -} - -/** - * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) -{ - __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); -} - /** * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer @@ -1812,6 +1688,9 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; + if (is_device_dma_coherent(dev)) + return; + for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); @@ -1830,6 +1709,9 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, struct scatterlist *s; int i; + if (is_device_dma_coherent(dev)) + return; + for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } @@ -1858,8 +1740,8 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, return dma_addr; prot = __dma_direction_to_prot(dir); - if (!dma_get_attr(DMA_ATTR_EXEC_MAPPING, attrs)) - prot |= IOMMU_NOEXEC; + prot = __get_iommu_pgprot(attrs, prot, + is_device_dma_coherent(dev)); ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); @@ -1886,38 +1768,14 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!is_device_dma_coherent(dev) && + !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); } /** - * arm_coherent_iommu_unmap_page - * @dev: valid struct device pointer - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Coherent IOMMU aware version of arm_dma_unmap_page() - */ -static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct dma_iommu_mapping *mapping = dev->archdata.mapping; - dma_addr_t iova = handle & PAGE_MASK; - int offset = handle & ~PAGE_MASK; - int len = PAGE_ALIGN(size + offset); - - if (!iova) - return; - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - -/** * arm_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer @@ -1940,7 +1798,8 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!(is_device_dma_coherent(dev) || + dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); @@ -1959,7 +1818,8 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, if (!iova) return; - __dma_page_dev_to_cpu(page, offset, size, dir); + if (!is_device_dma_coherent(dev)) + __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, @@ -1974,7 +1834,8 @@ static void arm_iommu_sync_single_for_device(struct device *dev, if (!iova) return; - __dma_page_cpu_to_dev(page, offset, size, dir); + if (!is_device_dma_coherent(dev)) + __dma_page_cpu_to_dev(page, offset, size, dir); } static int arm_iommu_dma_supported(struct device *dev, u64 mask) @@ -2016,22 +1877,6 @@ const struct dma_map_ops iommu_ops = { .mapping_error = arm_iommu_mapping_error, }; -const struct dma_map_ops iommu_coherent_ops = { - .alloc = arm_iommu_alloc_attrs, - .free = arm_iommu_free_attrs, - .mmap = arm_iommu_mmap_attrs, - .get_sgtable = arm_iommu_get_sgtable, - - .map_page = arm_coherent_iommu_map_page, - .unmap_page = arm_coherent_iommu_unmap_page, - - .map_sg = arm_coherent_iommu_map_sg, - .unmap_sg = arm_coherent_iommu_unmap_sg, - - .set_dma_mask = arm_dma_set_mask, - .dma_supported = arm_iommu_dma_supported, -}; - /** * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) diff --git a/drivers/base/regmap/regmap-swr.c b/drivers/base/regmap/regmap-swr.c index 027cbfc505ab..1641c374b189 100644 --- a/drivers/base/regmap/regmap-swr.c +++ b/drivers/base/regmap/regmap-swr.c @@ -28,11 +28,16 @@ static int regmap_swr_gather_write(void *context, struct device *dev = context; struct swr_device *swr = to_swr_device(dev); struct regmap *map = dev_get_regmap(dev, NULL); - size_t addr_bytes = map->format.reg_bytes; + size_t addr_bytes; size_t val_bytes; int i, ret = 0; u16 reg_addr = 0; + if (map == NULL) { + dev_err(dev, "%s: regmap is NULL\n", __func__); + return -EINVAL; + } + addr_bytes = map->format.reg_bytes; if (swr == NULL) { dev_err(dev, "%s: swr device is NULL\n", __func__); return -EINVAL; @@ -154,10 +159,15 @@ static int regmap_swr_read(void *context, struct device *dev = context; struct swr_device *swr = to_swr_device(dev); struct regmap *map = dev_get_regmap(dev, NULL); - size_t addr_bytes = map->format.reg_bytes; + size_t addr_bytes; int ret = 0; u16 reg_addr = 0; + if (map == NULL) { + dev_err(dev, "%s: regmap is NULL\n", __func__); + return -EINVAL; + } + addr_bytes = map->format.reg_bytes; if (swr == NULL) { dev_err(dev, "%s: swr is NULL\n", __func__); return -EINVAL; diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 13116f010e89..63dc23387133 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -2275,7 +2275,6 @@ static int fastrpc_cb_probe(struct device *dev) const char *name; unsigned int start = 0x80000000; int err = 0, i; - int disable_htw = 1; int secure_vmid = VMID_CP_PIXEL; VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL))); @@ -2311,9 +2310,6 @@ static int fastrpc_cb_probe(struct device *dev) start, 0x7fffffff))); if (err) goto bail; - iommu_domain_set_attr(sess->smmu.mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); iommu_set_fault_handler(sess->smmu.mapping->domain, fastrpc_smmu_fault_handler, sess); if (sess->smmu.secure) @@ -2341,7 +2337,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev) unsigned int *range = 0, range_size = 0; unsigned int *sids = 0, sids_size = 0; int err = 0, ret = 0, i; - int disable_htw = 1; VERIFY(err, 0 != (domains_child_node = of_get_child_by_name( dev->of_node, @@ -2395,9 +2390,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev) range[0], range[1]))); if (err) goto bail; - iommu_domain_set_attr(first_sess->smmu.mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); VERIFY(err, !arm_iommu_attach_device(first_sess->dev, first_sess->smmu.mapping)); if (err) diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index b830334dc701..f0cd6cf3967d 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -3066,8 +3066,8 @@ int diag_dci_write_proc(uint8_t peripheral, int pkt_type, char *buf, int len) !(driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask)) { DIAG_LOG(DIAG_DEBUG_DCI, "buf: 0x%pK, p: %d, len: %d, f_mask: %d\n", - buf, peripheral, len, - driver->feature[peripheral].rcvd_feature_mask); + buf, peripheral, len, + driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask); return -EINVAL; } diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c index fea1b74aacae..a2ffabe43c86 100644 --- a/drivers/char/diag/diagfwd_glink.c +++ b/drivers/char/diag/diagfwd_glink.c @@ -413,19 +413,16 @@ static int diag_glink_write(void *ctxt, unsigned char *buf, int len) return -ENODEV; } - err = wait_event_interruptible(glink_info->wait_q, - atomic_read(&glink_info->tx_intent_ready)); - if (err) { - diagfwd_write_buffer_done(glink_info->fwd_ctxt, buf); - return -ERESTARTSYS; - } - - atomic_dec(&glink_info->tx_intent_ready); - err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags); - if (!err) { - DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "%s wrote to glink, len: %d\n", - glink_info->name, len); - } + if (atomic_read(&glink_info->tx_intent_ready)) { + atomic_dec(&glink_info->tx_intent_ready); + err = glink_tx(glink_info->hdl, glink_info, buf, len, tx_flags); + if (!err) { + DIAG_LOG(DIAG_DEBUG_PERIPHERALS, + "%s wrote to glink, len: %d\n", + glink_info->name, len); + } + } else + err = -ENOMEM; return err; diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 066890aebf39..22b9e05086bd 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -751,7 +751,9 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len) if (!err) fwd_info->write_bytes += len; - + else + if (fwd_info->transport == TRANSPORT_GLINK) + diagfwd_write_buffer_done(fwd_info, buf_ptr); return err; } diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c index 0d733f49f184..d6cdbbc78827 100644 --- a/drivers/clk/msm/clock-osm.c +++ b/drivers/clk/msm/clock-osm.c @@ -79,6 +79,7 @@ enum clk_osm_trace_packet_id { #define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40)) #define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n))) #define SEQ_REG1_MSMCOBALT_V2 0x1048 +#define VERSION_REG 0x0 #define OSM_TABLE_SIZE 40 #define MAX_CLUSTER_CNT 2 @@ -182,7 +183,9 @@ enum clk_osm_trace_packet_id { #define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC #define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0 #define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4 +#define OSM_PLL_SW_OVERRIDE_EN 0x10C0 +#define PLL_SW_OVERRIDE_DROOP_EN BIT(0) #define DCVS_DROOP_TIMER_CTRL 0x10B8 #define SEQ_MEM_ADDR 0x500 #define SEQ_CFG_BR_ADDR 0x170 @@ -377,6 +380,11 @@ static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset) return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset); } +static inline int clk_osm_mb(struct clk_osm *c, int base) +{ + return readl_relaxed_no_log((char *)c->vbases[base] + VERSION_REG); +} + static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec) { u64 temp; @@ -478,7 +486,7 @@ static int clk_osm_set_rate(struct clk *c, unsigned long rate) } /* Make sure the write goes through before proceeding */ - mb(); + clk_osm_mb(cpuclk, OSM_BASE); return 0; } @@ -490,7 +498,7 @@ static int clk_osm_enable(struct clk *c) clk_osm_write_reg(cpuclk, 1, ENABLE_REG); /* Make sure the write goes through before proceeding */ - mb(); + clk_osm_mb(cpuclk, OSM_BASE); /* Wait for 5us for OSM hardware to enable */ udelay(5); @@ -1101,14 +1109,14 @@ static void clk_osm_setup_cluster_pll(struct clk_osm *c) PLL_MODE); /* Ensure writes complete before delaying */ - mb(); + clk_osm_mb(c, PLL_BASE); udelay(PLL_WAIT_LOCK_TIME_US); writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE); /* Ensure write completes before delaying */ - mb(); + clk_osm_mb(c, PLL_BASE); usleep_range(50, 75); @@ -1153,7 +1161,7 @@ static int clk_osm_setup_hw_table(struct clk_osm *c) } /* Make sure all writes go through */ - mb(); + clk_osm_mb(c, OSM_BASE); return 0; } @@ -1272,7 +1280,7 @@ static int clk_osm_set_cc_policy(struct platform_device *pdev) } /* Wait for the writes to complete */ - mb(); + clk_osm_mb(&perfcl_clk, OSM_BASE); rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-ret-inactive"); if (rc) { @@ -1297,7 +1305,7 @@ static int clk_osm_set_cc_policy(struct platform_device *pdev) clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE); /* Wait for the writes to complete */ - mb(); + clk_osm_mb(&perfcl_clk, OSM_BASE); devm_kfree(&pdev->dev, array); return 0; @@ -1392,7 +1400,7 @@ static int clk_osm_set_llm_freq_policy(struct platform_device *pdev) clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE); /* Wait for the write to complete */ - mb(); + clk_osm_mb(&perfcl_clk, OSM_BASE); devm_kfree(&pdev->dev, array); return 0; @@ -1467,7 +1475,7 @@ static int clk_osm_set_llm_volt_policy(struct platform_device *pdev) clk_osm_write_reg(&perfcl_clk, val, LLM_INTF_DCVS_DISABLE); /* Wait for the writes to complete */ - mb(); + clk_osm_mb(&perfcl_clk, OSM_BASE); devm_kfree(&pdev->dev, array); return 0; @@ -1668,7 +1676,7 @@ static void clk_osm_setup_osm_was(struct clk_osm *c) } /* Ensure writes complete before returning */ - mb(); + clk_osm_mb(c, OSM_BASE); } static void clk_osm_setup_fsms(struct clk_osm *c) @@ -1778,7 +1786,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c) val = clk_osm_read_reg(c, DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); - val |= BVAL(15, 0, clk_osm_count_ns(c, 500)); + val |= BVAL(15, 0, clk_osm_count_ns(c, 15000)); clk_osm_write_reg(c, val, DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG); } @@ -1792,7 +1800,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c) if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) { clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG); - clk_osm_write_reg(c, clk_osm_count_ns(c, 250), + clk_osm_write_reg(c, clk_osm_count_ns(c, 500), DROOP_RELEASE_TIMER_CTRL); clk_osm_write_reg(c, clk_osm_count_ns(c, 500), DCVS_DROOP_TIMER_CTRL); @@ -1801,6 +1809,11 @@ static void clk_osm_setup_fsms(struct clk_osm *c) BVAL(6, 0, 0x8); clk_osm_write_reg(c, val, DROOP_CTRL_REG); } + + /* Enable the PLL Droop Override */ + val = clk_osm_read_reg(c, OSM_PLL_SW_OVERRIDE_EN); + val |= PLL_SW_OVERRIDE_DROOP_EN; + clk_osm_write_reg(c, val, OSM_PLL_SW_OVERRIDE_EN); } static void clk_osm_do_additional_setup(struct clk_osm *c, @@ -1869,7 +1882,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c) SEQ_REG(76)); /* Ensure writes complete before returning */ - mb(); + clk_osm_mb(c, OSM_BASE); } else { if (msmcobalt_v1) { scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1), diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c index 9a080e4ee39b..a574a9cd2b5a 100644 --- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c +++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c @@ -18,6 +18,7 @@ #include <linux/iopoll.h> #include <linux/delay.h> #include <linux/clk/msm-clock-generic.h> +#include <linux/usb/usbpd.h> #include "mdss-pll.h" #include "mdss-dp-pll.h" @@ -172,9 +173,27 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate) { u32 res = 0; struct mdss_pll_resources *dp_res = vco->priv; + u8 orientation, ln_cnt; + u32 spare_value; + + spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0); + ln_cnt = spare_value & 0x0F; + orientation = (spare_value & 0xF0) >> 4; + pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n", + __func__, spare_value, ln_cnt, orientation); + + if (ln_cnt != 4) { + if (orientation == ORIENTATION_CC2) + MDSS_PLL_REG_W(dp_res->phy_base, + DP_PHY_PD_CTL, 0x2d); + else + MDSS_PLL_REG_W(dp_res->phy_base, + DP_PHY_PD_CTL, 0x35); + } else { + MDSS_PLL_REG_W(dp_res->phy_base, + DP_PHY_PD_CTL, 0x3d); + } - MDSS_PLL_REG_W(dp_res->phy_base, - DP_PHY_PD_CTL, 0x3d); /* Make sure the PHY register writes are done */ wmb(); MDSS_PLL_REG_W(dp_res->pll_base, @@ -314,8 +333,13 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate) /* Make sure the PLL register writes are done */ wmb(); - MDSS_PLL_REG_W(dp_res->phy_base, - DP_PHY_MODE, 0x58); + if (orientation == ORIENTATION_CC2) + MDSS_PLL_REG_W(dp_res->phy_base, + DP_PHY_MODE, 0x48); + else + MDSS_PLL_REG_W(dp_res->phy_base, + DP_PHY_MODE, 0x58); + MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_TX0_TX1_LANE_CTL, 0x05); MDSS_PLL_REG_W(dp_res->phy_base, @@ -427,6 +451,12 @@ static int dp_pll_enable(struct clk *c) u32 status; struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c); struct mdss_pll_resources *dp_res = vco->priv; + u8 orientation, ln_cnt; + u32 spare_value, bias_en, drvr_en; + + spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0); + ln_cnt = spare_value & 0x0F; + orientation = (spare_value & 0xF0) >> 4; MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01); @@ -474,18 +504,45 @@ static int dp_pll_enable(struct clk *c) pr_debug("%s: PLL is locked\n", __func__); - MDSS_PLL_REG_W(dp_res->phy_base, + if (ln_cnt == 1) { + bias_en = 0x3e; + drvr_en = 0x13; + } else { + bias_en = 0x3f; + drvr_en = 0x10; + } + + if (ln_cnt != 4) { + if (orientation == ORIENTATION_CC1) { + MDSS_PLL_REG_W(dp_res->phy_base, QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, - 0x3f); - MDSS_PLL_REG_W(dp_res->phy_base, + bias_en); + MDSS_PLL_REG_W(dp_res->phy_base, QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN, - 0x10); - MDSS_PLL_REG_W(dp_res->phy_base, + drvr_en); + } else { + MDSS_PLL_REG_W(dp_res->phy_base, QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, - 0x3f); - MDSS_PLL_REG_W(dp_res->phy_base, + bias_en); + MDSS_PLL_REG_W(dp_res->phy_base, + QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN, + drvr_en); + } + } else { + MDSS_PLL_REG_W(dp_res->phy_base, + QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, + bias_en); + MDSS_PLL_REG_W(dp_res->phy_base, QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN, - 0x10); + drvr_en); + MDSS_PLL_REG_W(dp_res->phy_base, + QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, + bias_en); + MDSS_PLL_REG_W(dp_res->phy_base, + QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN, + drvr_en); + } + MDSS_PLL_REG_W(dp_res->phy_base, QSERDES_TX0_OFFSET + TXn_TX_POL_INV, 0x0a); @@ -615,7 +672,7 @@ int dp_vco_prepare(struct clk *c) rc = dp_pll_enable(c); if (rc) { mdss_pll_resource_enable(dp_pll_res, false); - pr_err("ndx=%d failed to enable dsi pll\n", + pr_err("ndx=%d failed to enable dp pll\n", dp_pll_res->index); goto error; } diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h index d89545b38e64..28f21ed1fe0d 100644 --- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h +++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h @@ -41,6 +41,7 @@ #define DP_PHY_TX0_TX1_LANE_CTL 0x0068 #define DP_PHY_TX2_TX3_LANE_CTL 0x0084 +#define DP_PHY_SPARE0 0x00A8 #define DP_PHY_STATUS 0x00BC /* Tx registers */ diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c index f6c85cf8d9a4..5f779ec9bcc3 100644 --- a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c +++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c @@ -685,6 +685,10 @@ static void pll_db_commit_8996(struct mdss_pll_resources *pll, MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0); wmb(); /* make sure register committed */ + MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_VCO_TUNE, 0); + MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_CODE, 0); + wmb(); /* make sure register committed */ + data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1 */ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data); diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 5b9ce12c1e02..e39686ca4feb 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -155,6 +155,7 @@ config MSM_MMCC_8996 config MSM_GCC_FALCON tristate "MSMFALCON Global Clock Controller" + select QCOM_GDSC depends on COMMON_CLK_QCOM ---help--- Support for the global clock controller on Qualcomm Technologies, Inc @@ -162,6 +163,16 @@ config MSM_GCC_FALCON Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, UFS, SD/eMMC, PCIe, etc. +config MSM_GPUCC_FALCON + tristate "MSMFALCON Graphics Clock Controller" + select MSM_GCC_FALCON + depends on COMMON_CLK_QCOM + help + Support for the graphics clock controller on Qualcomm Technologies, Inc + MSMfalcon devices. + Say Y if you want to support graphics controller devices which will + be required to enable those device. + config QCOM_HFPLL tristate "High-Frequency PLL (HFPLL) Clock Controller" depends on COMMON_CLK_QCOM diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index af58f206bc4a..7ee0294e9dc7 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MSM_GCC_FALCON) += gcc-msmfalcon.o obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o +obj-$(CONFIG_MSM_GPUCC_FALCON) += gpucc-msmfalcon.o obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o obj-$(CONFIG_QCOM_HFPLL) += hfpll.o obj-$(CONFIG_KRAITCC) += krait-cc.o diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index a075859771d3..933a208392bd 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -870,6 +870,7 @@ EXPORT_SYMBOL_GPL(clk_gfx3d_ops); static int clk_gfx3d_src_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { + struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_rate_request parent_req = { }; struct clk_hw *p1, *p3, *xo, *curr_p; const struct freq_tbl *f; diff --git a/drivers/clk/qcom/gpucc-msmfalcon.c b/drivers/clk/qcom/gpucc-msmfalcon.c new file mode 100644 index 000000000000..a2127e2629c7 --- /dev/null +++ b/drivers/clk/qcom/gpucc-msmfalcon.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> +#include <dt-bindings/clock/qcom,gpu-msmfalcon.h> + +#include "clk-alpha-pll.h" +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "vdd-level-falcon.h" + +#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } +#define F_GFX(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) } + +static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL); +static DEFINE_VDD_REGULATORS(vdd_mx, VDD_DIG_NUM, 1, vdd_corner, NULL); +static DEFINE_VDD_REGS_INIT(vdd_gfx, 1); + +enum { + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_PLL0_PLL_OUT_MAIN, + P_GPU_PLL1_PLL_OUT_MAIN, + P_XO, +}; + +static const struct parent_map gpucc_parent_map_0[] = { + { P_XO, 0 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpucc_parent_names_0[] = { + "cxo_a", + "gcc_gpu_gpll0_clk", + "gcc_gpu_gpll0_div_clk", + "core_bi_pll_test_se", +}; + +static const struct parent_map gpucc_parent_map_1[] = { + { P_XO, 0 }, + { P_GPU_PLL0_PLL_OUT_MAIN, 1 }, + { P_GPU_PLL1_PLL_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpucc_parent_names_1[] = { + "xo", + "gpu_pll0_pll_out_main", + "gpu_pll1_pll_out_main", + "gcc_gpu_gpll0_clk", + "core_bi_pll_test_se", +}; + +static struct pll_vco gpu_vco[] = { + { 1000000000, 2000000000, 0 }, + { 500000000, 1000000000, 2 }, + { 250000000, 500000000, 3 }, +}; + +/* 640MHz configuration */ +static const struct pll_config gpu_pll0_config = { + .l = 0x21, + .config_ctl_val = 0x4001055b, + .alpha = 0x55555600, + .alpha_u = 0x55, + .alpha_en_mask = BIT(24), + .vco_val = 0x2 << 20, + .vco_mask = 0x3 << 20, + .main_output_mask = 0x1, +}; + +static struct pll_vco_data pll_data[] = { + /* Frequency post-div */ + { 640000000, 0x1 }, +}; + +static struct clk_alpha_pll gpu_pll0_pll_out_main = { + .offset = 0x0, + .vco_table = gpu_vco, + .num_vco = ARRAY_SIZE(gpu_vco), + .vco_data = pll_data, + .num_vco_data = ARRAY_SIZE(pll_data), + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_pll0_pll_out_main", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + VDD_GPU_PLL_FMAX_MAP6( + MIN, 266000000, + LOWER, 432000000, + LOW, 640000000, + LOW_L1, 800000000, + NOMINAL, 1020000000, + HIGH, 1500000000), + }, + }, +}; + +static struct clk_alpha_pll gpu_pll1_pll_out_main = { + .offset = 0x40, + .vco_table = gpu_vco, + .num_vco = ARRAY_SIZE(gpu_vco), + .vco_data = pll_data, + .num_vco_data = ARRAY_SIZE(pll_data), + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_pll1_pll_out_main", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_ops, + VDD_GPU_PLL_FMAX_MAP6( + MIN, 266000000, + LOWER, 432000000, + LOW, 640000000, + LOW_L1, 800000000, + NOMINAL, 1020000000, + HIGH, 1500000000), + }, + }, +}; + +/* GFX clock init data */ +static struct clk_init_data gpu_clks_init[] = { + [0] = { + .name = "gfx3d_clk_src", + .parent_names = gpucc_parent_names_1, + .num_parents = 3, + .ops = &clk_gfx3d_src_ops, + .flags = CLK_SET_RATE_PARENT, + }, + [1] = { + .name = "gpucc_gfx3d_clk", + .parent_names = (const char *[]){ + "gfx3d_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + .flags = CLK_SET_RATE_PARENT, + .vdd_class = &vdd_gfx, + }, +}; + +/* + * Frequencies and PLL configuration + * The PLL source would be to ping-pong between GPU-PLL0 + * and GPU-PLL1. + * ==================================================== + * | F | PLL SRC Freq | PLL postdiv | RCG Div | + * ==================================================== + * | 160000000 | 640000000 | 2 | 2 | + * | 266000000 | 532000000 | 1 | 2 | + * | 370000000 | 740000000 | 1 | 2 | + * | 465000000 | 930000000 | 1 | 2 | + * | 588000000 | 1176000000 | 1 | 2 | + * | 647000000 | 1294000000 | 1 | 2 | + * | 750000000 | 1500000000 | 1 | 2 | + * ==================================================== +*/ + +static const struct freq_tbl ftbl_gfx3d_clk_src[] = { + F_GFX( 19200000, 0, 1, 0, 0, 0), + F_GFX(160000000, 0, 2, 0, 0, 640000000), + F_GFX(266000000, 0, 2, 0, 0, 532000000), + F_GFX(370000000, 0, 2, 0, 0, 740000000), + F_GFX(465000000, 0, 2, 0, 0, 930000000), + F_GFX(588000000, 0, 2, 0, 0, 1176000000), + F_GFX(647000000, 0, 2, 0, 0, 1294000000), + F_GFX(750000000, 0, 2, 0, 0, 1500000000), + { } +}; + +static struct clk_rcg2 gfx3d_clk_src = { + .cmd_rcgr = 0x1070, + .mnd_width = 0, + .hid_width = 5, + .freq_tbl = ftbl_gfx3d_clk_src, + .parent_map = gpucc_parent_map_1, + .flags = FORCE_ENABLE_RCGR, + .clkr.hw.init = &gpu_clks_init[0], +}; + +static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 rbbmtimer_clk_src = { + .cmd_rcgr = 0x10b0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpucc_parent_map_0, + .freq_tbl = ftbl_rbbmtimer_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "rbbmtimer_clk_src", + .parent_names = gpucc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + VDD_DIG_FMAX_MAP1(MIN, 19200000), + }, +}; + +static const struct freq_tbl ftbl_rbcpr_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0), + { } +}; + +static struct clk_rcg2 rbcpr_clk_src = { + .cmd_rcgr = 0x1030, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpucc_parent_map_0, + .freq_tbl = ftbl_rbcpr_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "rbcpr_clk_src", + .parent_names = gpucc_parent_names_0, + .num_parents = 4, + .ops = &clk_rcg2_ops, + VDD_DIG_FMAX_MAP2( + MIN, 19200000, + NOMINAL, 50000000), + }, +}; + +static struct clk_branch gpucc_cxo_clk = { + .halt_reg = 0x1020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpucc_cxo_clk", + .parent_names = (const char *[]) { + "cxo_a", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpucc_gfx3d_clk = { + .halt_reg = 0x1098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1098, + .enable_mask = BIT(0), + .hw.init = &gpu_clks_init[1], + }, +}; + +static struct clk_branch gpucc_rbbmtimer_clk = { + .halt_reg = 0x10d0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10d0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpucc_rbbmtimer_clk", + .parent_names = (const char *[]){ + "rbbmtimer_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpucc_rbcpr_clk = { + .halt_reg = 0x1054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpucc_rbcpr_clk", + .parent_names = (const char *[]){ + "rbcpr_clk_src", + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *gpucc_falcon_clocks[] = { + [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr, + [GPU_PLL0_PLL] = &gpu_pll0_pll_out_main.clkr, + [GPU_PLL1_PLL] = &gpu_pll1_pll_out_main.clkr, + [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr, + [GPUCC_GFX3D_CLK] = &gpucc_gfx3d_clk.clkr, + [GPUCC_RBBMTIMER_CLK] = &gpucc_rbbmtimer_clk.clkr, + [GPUCC_RBCPR_CLK] = &gpucc_rbcpr_clk.clkr, + [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, + [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, +}; + +static const struct regmap_config gpucc_falcon_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9034, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpucc_falcon_desc = { + .config = &gpucc_falcon_regmap_config, + .clks = gpucc_falcon_clocks, + .num_clks = ARRAY_SIZE(gpucc_falcon_clocks), +}; + +static const struct of_device_id gpucc_falcon_match_table[] = { + { .compatible = "qcom,gpucc-msmfalcon" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpucc_falcon_match_table); + +static int of_get_fmax_vdd_class(struct platform_device *pdev, + struct clk_hw *hw, char *prop_name, u32 index) +{ + struct device_node *of = pdev->dev.of_node; + int prop_len, i, j; + struct clk_vdd_class *vdd = hw->init->vdd_class; + int num = vdd->num_regulators + 1; + u32 *array; + + if (!of_find_property(of, prop_name, &prop_len)) { + dev_err(&pdev->dev, "missing %s\n", prop_name); + return -EINVAL; + } + + prop_len /= sizeof(u32); + if (prop_len % num) { + dev_err(&pdev->dev, "bad length %d\n", prop_len); + return -EINVAL; + } + + prop_len /= num; + vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int), + GFP_KERNEL); + if (!vdd->level_votes) + return -ENOMEM; + + vdd->vdd_uv = devm_kzalloc(&pdev->dev, + prop_len * sizeof(int) * (num - 1), GFP_KERNEL); + if (!vdd->vdd_uv) + return -ENOMEM; + + gpu_clks_init[index].fmax = devm_kzalloc(&pdev->dev, prop_len * + sizeof(unsigned long), GFP_KERNEL); + if (!gpu_clks_init[index].fmax) + return -ENOMEM; + + array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32) * num, + GFP_KERNEL); + if (!array) + return -ENOMEM; + + of_property_read_u32_array(of, prop_name, array, prop_len * num); + for (i = 0; i < prop_len; i++) { + gpu_clks_init[index].fmax[i] = array[num * i]; + for (j = 1; j < num; j++) { + vdd->vdd_uv[(num - 1) * i + (j - 1)] = + array[num * i + j]; + } + } + + devm_kfree(&pdev->dev, array); + vdd->num_levels = prop_len; + vdd->cur_level = prop_len; + gpu_clks_init[index].num_fmax = prop_len; + + return 0; +} + +static int gpucc_falcon_probe(struct platform_device *pdev) +{ + int ret = 0; + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gpucc_falcon_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* CX Regulator for RBBMTimer and RBCPR clock */ + vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_gfx"); + if (IS_ERR(vdd_dig.regulator[0])) { + if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_dig regulator\n"); + return PTR_ERR(vdd_dig.regulator[0]); + } + + /* Mx Regulator for GPU-PLLs */ + vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_gfx"); + if (IS_ERR(vdd_mx.regulator[0])) { + if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_mx regulator\n"); + return PTR_ERR(vdd_mx.regulator[0]); + } + + /* GFX Rail Regulator for GFX3D clock */ + vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx"); + if (IS_ERR(vdd_gfx.regulator[0])) { + if (!(PTR_ERR(vdd_gfx.regulator[0]) == -EPROBE_DEFER)) + dev_err(&pdev->dev, + "Unable to get vdd_gfx regulator\n"); + return PTR_ERR(vdd_gfx.regulator[0]); + } + + /* GFX rail fmax data linked to branch clock */ + of_get_fmax_vdd_class(pdev, &gpucc_gfx3d_clk.clkr.hw, + "qcom,gfxfreq-corner", 1); + + clk_alpha_pll_configure(&gpu_pll0_pll_out_main, regmap, + &gpu_pll0_config); + clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap, + &gpu_pll0_config); + + ret = qcom_cc_really_probe(pdev, &gpucc_falcon_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register GPUCC clocks\n"); + return ret; + } + + clk_prepare_enable(gpucc_cxo_clk.clkr.hw.clk); + + dev_info(&pdev->dev, "Registered GPUCC clocks\n"); + + return ret; +} + +static struct platform_driver gpucc_falcon_driver = { + .probe = gpucc_falcon_probe, + .driver = { + .name = "gpucc-msmfalcon", + .of_match_table = gpucc_falcon_match_table, + }, +}; + +static int __init gpucc_falcon_init(void) +{ + return platform_driver_register(&gpucc_falcon_driver); +} +core_initcall_sync(gpucc_falcon_init); + +static void __exit gpucc_falcon_exit(void) +{ + platform_driver_unregister(&gpucc_falcon_driver); +} +module_exit(gpucc_falcon_exit); diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index f4ae70ac9315..b40231dd8dd1 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -38,34 +38,138 @@ static const struct lpm_type_str lpm_types[] = { {SUSPEND, "suspend_enabled"}, }; +static DEFINE_PER_CPU(uint32_t *, max_residency); +static DEFINE_PER_CPU(uint32_t *, min_residency); static struct lpm_level_avail *cpu_level_available[NR_CPUS]; static struct platform_device *lpm_pdev; -static void *get_avail_val(struct kobject *kobj, struct kobj_attribute *attr) +static void *get_enabled_ptr(struct kobj_attribute *attr, + struct lpm_level_avail *avail) { void *arg = NULL; + + if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) + arg = (void *) &avail->idle_enabled; + else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) + arg = (void *) &avail->suspend_enabled; + + return arg; +} + +static struct lpm_level_avail *get_avail_ptr(struct kobject *kobj, + struct kobj_attribute *attr) +{ struct lpm_level_avail *avail = NULL; - if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) { + if (!strcmp(attr->attr.name, lpm_types[IDLE].str)) avail = container_of(attr, struct lpm_level_avail, idle_enabled_attr); - arg = (void *) &avail->idle_enabled; - } else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) { + else if (!strcmp(attr->attr.name, lpm_types[SUSPEND].str)) avail = container_of(attr, struct lpm_level_avail, suspend_enabled_attr); - arg = (void *) &avail->suspend_enabled; + + return avail; +} + +static void set_optimum_cpu_residency(struct lpm_cpu *cpu, int cpu_id, + bool probe_time) +{ + int i, j; + bool mode_avail; + uint32_t *maximum_residency = per_cpu(max_residency, cpu_id); + uint32_t *minimum_residency = per_cpu(min_residency, cpu_id); + + for (i = 0; i < cpu->nlevels; i++) { + struct power_params *pwr = &cpu->levels[i].pwr; + + mode_avail = probe_time || + lpm_cpu_mode_allow(cpu_id, i, true); + + if (!mode_avail) { + maximum_residency[i] = 0; + minimum_residency[i] = 0; + continue; + } + + maximum_residency[i] = ~0; + for (j = i + 1; j < cpu->nlevels; j++) { + mode_avail = probe_time || + lpm_cpu_mode_allow(cpu_id, j, true); + + if (mode_avail && + (maximum_residency[i] > pwr->residencies[j]) && + (pwr->residencies[j] != 0)) + maximum_residency[i] = pwr->residencies[j]; + } + + minimum_residency[i] = pwr->time_overhead_us; + for (j = i-1; j >= 0; j--) { + if (probe_time || lpm_cpu_mode_allow(cpu_id, j, true)) { + minimum_residency[i] = maximum_residency[j] + 1; + break; + } + } } +} - return arg; +static void set_optimum_cluster_residency(struct lpm_cluster *cluster, + bool probe_time) +{ + int i, j; + bool mode_avail; + + for (i = 0; i < cluster->nlevels; i++) { + struct power_params *pwr = &cluster->levels[i].pwr; + + mode_avail = probe_time || + lpm_cluster_mode_allow(cluster, i, + true); + + if (!mode_avail) { + pwr->max_residency = 0; + pwr->min_residency = 0; + continue; + } + + pwr->max_residency = ~0; + for (j = i+1; j < cluster->nlevels; j++) { + mode_avail = probe_time || + lpm_cluster_mode_allow(cluster, j, + true); + if (mode_avail && + (pwr->max_residency > pwr->residencies[j]) && + (pwr->residencies[j] != 0)) + pwr->max_residency = pwr->residencies[j]; + } + + pwr->min_residency = pwr->time_overhead_us; + for (j = i-1; j >= 0; j--) { + if (probe_time || + lpm_cluster_mode_allow(cluster, j, true)) { + pwr->min_residency = + cluster->levels[j].pwr.max_residency + 1; + break; + } + } + } } +uint32_t *get_per_cpu_max_residency(int cpu) +{ + return per_cpu(max_residency, cpu); +} + +uint32_t *get_per_cpu_min_residency(int cpu) +{ + return per_cpu(min_residency, cpu); +} ssize_t lpm_enable_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { int ret = 0; struct kernel_param kp; - kp.arg = get_avail_val(kobj, attr); + kp.arg = get_enabled_ptr(attr, get_avail_ptr(kobj, attr)); ret = param_get_bool(buf, &kp); if (ret > 0) { strlcat(buf, "\n", PAGE_SIZE); @@ -80,15 +184,25 @@ ssize_t lpm_enable_store(struct kobject *kobj, struct kobj_attribute *attr, { int ret = 0; struct kernel_param kp; + struct lpm_level_avail *avail; - kp.arg = get_avail_val(kobj, attr); + avail = get_avail_ptr(kobj, attr); + if (WARN_ON(!avail)) + return -EINVAL; + kp.arg = get_enabled_ptr(attr, avail); ret = param_set_bool(buf, &kp); + if (avail->cpu_node) + set_optimum_cpu_residency(avail->data, avail->idx, false); + else + set_optimum_cluster_residency(avail->data, false); + return ret ? ret : len; } static int create_lvl_avail_nodes(const char *name, - struct kobject *parent, struct lpm_level_avail *avail) + struct kobject *parent, struct lpm_level_avail *avail, + void *data, int index, bool cpu_node) { struct attribute_group *attr_group = NULL; struct attribute **attr = NULL; @@ -139,6 +253,9 @@ static int create_lvl_avail_nodes(const char *name, avail->idle_enabled = true; avail->suspend_enabled = true; avail->kobj = kobj; + avail->data = data; + avail->idx = index; + avail->cpu_node = cpu_node; return ret; @@ -181,7 +298,8 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) for (i = 0; i < p->cpu->nlevels; i++) { ret = create_lvl_avail_nodes(p->cpu->levels[i].name, - cpu_kobj[cpu_idx], &level_list[i]); + cpu_kobj[cpu_idx], &level_list[i], + (void *)p->cpu, cpu, true); if (ret) goto release_kobj; } @@ -215,7 +333,8 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj) for (i = 0; i < p->nlevels; i++) { ret = create_lvl_avail_nodes(p->levels[i].level_name, - cluster_kobj, &p->levels[i].available); + cluster_kobj, &p->levels[i].available, + (void *)p, 0, false); if (ret) return ret; } @@ -421,6 +540,9 @@ static int parse_power_params(struct device_node *node, key = "qcom,time-overhead"; ret = of_property_read_u32(node, key, &pwr->time_overhead_us); + if (ret) + goto fail; + fail: if (ret) pr_err("%s(): %s Error reading %s\n", __func__, node->name, @@ -615,11 +737,31 @@ static int get_cpumask_for_node(struct device_node *node, struct cpumask *mask) return 0; } +static int calculate_residency(struct power_params *base_pwr, + struct power_params *next_pwr) +{ + int32_t residency = (int32_t)(next_pwr->energy_overhead - + base_pwr->energy_overhead) - + ((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us) + - (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us)); + + residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power); + + if (residency < 0) { + __WARN_printf("%s: Incorrect power attributes for LPM\n", + __func__); + return next_pwr->time_overhead_us; + } + + return residency < next_pwr->time_overhead_us ? + next_pwr->time_overhead_us : residency; +} + static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) { struct device_node *n; int ret = -ENOMEM; - int i; + int i, j; char *key; c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL); @@ -676,6 +818,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) else if (ret) goto failed; } + for (i = 0; i < c->cpu->nlevels; i++) { + for (j = 0; j < c->cpu->nlevels; j++) { + if (i >= j) { + c->cpu->levels[i].pwr.residencies[j] = 0; + continue; + } + + c->cpu->levels[i].pwr.residencies[j] = + calculate_residency(&c->cpu->levels[i].pwr, + &c->cpu->levels[j].pwr); + + pr_err("%s: idx %d %u\n", __func__, j, + c->cpu->levels[i].pwr.residencies[j]); + } + } + return 0; failed: for (i = 0; i < c->cpu->nlevels; i++) { @@ -732,6 +890,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, struct device_node *n; char *key; int ret = 0; + int i, j; c = devm_kzalloc(&lpm_pdev->dev, sizeof(*c), GFP_KERNEL); if (!c) @@ -789,6 +948,22 @@ struct lpm_cluster *parse_cluster(struct device_node *node, goto failed_parse_cluster; c->aff_level = 1; + + for_each_cpu(i, &c->child_cpus) { + per_cpu(max_residency, i) = devm_kzalloc( + &lpm_pdev->dev, + sizeof(uint32_t) * c->cpu->nlevels, + GFP_KERNEL); + if (!per_cpu(max_residency, i)) + return ERR_PTR(-ENOMEM); + per_cpu(min_residency, i) = devm_kzalloc( + &lpm_pdev->dev, + sizeof(uint32_t) * c->cpu->nlevels, + GFP_KERNEL); + if (!per_cpu(min_residency, i)) + return ERR_PTR(-ENOMEM); + set_optimum_cpu_residency(c->cpu, i, true); + } } } @@ -797,6 +972,17 @@ struct lpm_cluster *parse_cluster(struct device_node *node, else c->last_level = c->nlevels-1; + for (i = 0; i < c->nlevels; i++) { + for (j = 0; j < c->nlevels; j++) { + if (i >= j) { + c->levels[i].pwr.residencies[j] = 0; + continue; + } + c->levels[i].pwr.residencies[j] = calculate_residency( + &c->levels[i].pwr, &c->levels[j].pwr); + } + } + set_optimum_cluster_residency(c, true); return c; failed_parse_cluster: diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 4f880fdd1478..37e504381313 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1,4 +1,6 @@ /* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> + * Copyright (C) 2009 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -83,9 +85,36 @@ struct lpm_debug { struct lpm_cluster *lpm_root_node; +static bool lpm_prediction; +module_param_named(lpm_prediction, + lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP); + +static uint32_t ref_stddev = 100; +module_param_named( + ref_stddev, ref_stddev, uint, S_IRUGO | S_IWUSR | S_IWGRP +); + +static uint32_t tmr_add = 100; +module_param_named( + tmr_add, tmr_add, uint, S_IRUGO | S_IWUSR | S_IWGRP +); + +struct lpm_history { + uint32_t resi[MAXSAMPLES]; + int mode[MAXSAMPLES]; + int nsamp; + uint32_t hptr; + uint32_t hinvalid; + uint32_t htmr_wkup; + int64_t stime; +}; + +static DEFINE_PER_CPU(struct lpm_history, hist); + static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster); static bool suspend_in_progress; static struct hrtimer lpm_hrtimer; +static struct hrtimer histtimer; static struct lpm_debug *lpm_debug; static phys_addr_t lpm_debug_phys; static const int num_dbg_elements = 0x100; @@ -327,10 +356,79 @@ static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h) return HRTIMER_NORESTART; } +static void histtimer_cancel(void) +{ + hrtimer_try_to_cancel(&histtimer); +} + +static enum hrtimer_restart histtimer_fn(struct hrtimer *h) +{ + int cpu = raw_smp_processor_id(); + struct lpm_history *history = &per_cpu(hist, cpu); + + history->hinvalid = 1; + return HRTIMER_NORESTART; +} + +static void histtimer_start(uint32_t time_us) +{ + uint64_t time_ns = time_us * NSEC_PER_USEC; + ktime_t hist_ktime = ns_to_ktime(time_ns); + + histtimer.function = histtimer_fn; + hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED); +} + +static void cluster_timer_init(struct lpm_cluster *cluster) +{ + struct list_head *list; + + if (!cluster) + return; + + hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + + list_for_each(list, &cluster->child) { + struct lpm_cluster *n; + + n = list_entry(list, typeof(*n), list); + cluster_timer_init(n); + } +} + +static void clusttimer_cancel(void) +{ + int cpu = raw_smp_processor_id(); + struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + + hrtimer_try_to_cancel(&cluster->histtimer); + hrtimer_try_to_cancel(&cluster->parent->histtimer); +} + +static enum hrtimer_restart clusttimer_fn(struct hrtimer *h) +{ + struct lpm_cluster *cluster = container_of(h, + struct lpm_cluster, histtimer); + + cluster->history.hinvalid = 1; + return HRTIMER_NORESTART; +} + +static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us) +{ + uint64_t time_ns = time_us * NSEC_PER_USEC; + ktime_t clust_ktime = ns_to_ktime(time_ns); + + cluster->histtimer.function = clusttimer_fn; + hrtimer_start(&cluster->histtimer, clust_ktime, + HRTIMER_MODE_REL_PINNED); +} + static void msm_pm_set_timer(uint32_t modified_time_us) { u64 modified_time_ns = modified_time_us * NSEC_PER_USEC; ktime_t modified_ktime = ns_to_ktime(modified_time_ns); + lpm_hrtimer.function = lpm_hrtimer_cb; hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED); } @@ -415,22 +513,168 @@ static int set_device_mode(struct lpm_cluster *cluster, int ndevice, return -EINVAL; } +static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev, + struct lpm_cpu *cpu, int *idx_restrict, + uint32_t *idx_restrict_time) +{ + int i, j, divisor; + uint64_t max, avg, stddev; + int64_t thresh = LLONG_MAX; + struct lpm_history *history = &per_cpu(hist, dev->cpu); + uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu); + + if (!lpm_prediction) + return 0; + + /* + * Samples are marked invalid when woken-up due to timer, + * so donot predict. + */ + if (history->hinvalid) { + history->hinvalid = 0; + history->htmr_wkup = 1; + history->stime = 0; + return 0; + } + + /* + * Predict only when all the samples are collected. + */ + if (history->nsamp < MAXSAMPLES) { + history->stime = 0; + return 0; + } + + /* + * Check if the samples are not much deviated, if so use the + * average of those as predicted sleep time. Else if any + * specific mode has more premature exits return the index of + * that mode. + */ + +again: + max = avg = divisor = stddev = 0; + for (i = 0; i < MAXSAMPLES; i++) { + int64_t value = history->resi[i]; + + if (value <= thresh) { + avg += value; + divisor++; + if (value > max) + max = value; + } + } + do_div(avg, divisor); + + for (i = 0; i < MAXSAMPLES; i++) { + int64_t value = history->resi[i]; + + if (value <= thresh) { + int64_t diff = value - avg; + + stddev += diff * diff; + } + } + do_div(stddev, divisor); + stddev = int_sqrt(stddev); + + /* + * If the deviation is less, return the average, else + * ignore one maximum sample and retry + */ + if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1))) + || stddev <= ref_stddev) { + history->stime = ktime_to_us(ktime_get()) + avg; + return avg; + } else if (divisor > (MAXSAMPLES - 1)) { + thresh = max - 1; + goto again; + } + + /* + * Find the number of premature exits for each of the mode, + * excluding clockgating mode, and they are more than fifty + * percent restrict that and deeper modes. + */ + if (history->htmr_wkup != 1) { + for (j = 1; j < cpu->nlevels; j++) { + uint32_t failed = 0; + uint64_t total = 0; + + for (i = 0; i < MAXSAMPLES; i++) { + if ((history->mode[i] == j) && + (history->resi[i] < min_residency[j])) { + failed++; + total += history->resi[i]; + } + } + if (failed > (MAXSAMPLES/2)) { + *idx_restrict = j; + do_div(total, failed); + *idx_restrict_time = total; + history->stime = ktime_to_us(ktime_get()) + + *idx_restrict_time; + break; + } + } + } + return 0; +} + +static inline void invalidate_predict_history(struct cpuidle_device *dev) +{ + struct lpm_history *history = &per_cpu(hist, dev->cpu); + + if (!lpm_prediction) + return; + + if (history->hinvalid) { + history->hinvalid = 0; + history->htmr_wkup = 1; + history->stime = 0; + } +} + +static void clear_predict_history(void) +{ + struct lpm_history *history; + int i; + unsigned int cpu; + + if (!lpm_prediction) + return; + + for_each_possible_cpu(cpu) { + history = &per_cpu(hist, cpu); + for (i = 0; i < MAXSAMPLES; i++) { + history->resi[i] = 0; + history->mode[i] = -1; + history->hptr = 0; + history->nsamp = 0; + history->stime = 0; + } + } +} + +static void update_history(struct cpuidle_device *dev, int idx); + static int cpu_power_select(struct cpuidle_device *dev, struct lpm_cpu *cpu) { int best_level = -1; - uint32_t best_level_pwr = ~0U; uint32_t latency_us = pm_qos_request_for_cpu(PM_QOS_CPU_DMA_LATENCY, dev->cpu); uint32_t sleep_us = (uint32_t)(ktime_to_us(tick_nohz_get_sleep_length())); uint32_t modified_time_us = 0; uint32_t next_event_us = 0; - uint32_t pwr; - int i; + int i, idx_restrict; uint32_t lvl_latency_us = 0; - uint32_t lvl_overhead_us = 0; - uint32_t lvl_overhead_energy = 0; + uint64_t predicted = 0; + uint32_t htime = 0, idx_restrict_time = 0; + uint32_t next_wakeup_us = sleep_us; + uint32_t *min_residency = get_per_cpu_min_residency(dev->cpu); + uint32_t *max_residency = get_per_cpu_max_residency(dev->cpu); if (!cpu) return -EINVAL; @@ -438,12 +682,13 @@ static int cpu_power_select(struct cpuidle_device *dev, if (sleep_disabled) return 0; + idx_restrict = cpu->nlevels + 1; + next_event_us = (uint32_t)(ktime_to_us(get_next_event_time(dev->cpu))); for (i = 0; i < cpu->nlevels; i++) { struct lpm_cpu_level *level = &cpu->levels[i]; struct power_params *pwr_params = &level->pwr; - uint32_t next_wakeup_us = sleep_us; enum msm_pm_sleep_mode mode = level->mode; bool allow; @@ -454,66 +699,88 @@ static int cpu_power_select(struct cpuidle_device *dev, lvl_latency_us = pwr_params->latency_us; - lvl_overhead_us = pwr_params->time_overhead_us; - - lvl_overhead_energy = pwr_params->energy_overhead; - if (latency_us < lvl_latency_us) - continue; + break; if (next_event_us) { if (next_event_us < lvl_latency_us) - continue; + break; if (((next_event_us - lvl_latency_us) < sleep_us) || (next_event_us < sleep_us)) next_wakeup_us = next_event_us - lvl_latency_us; } - if (next_wakeup_us <= pwr_params->time_overhead_us) - continue; - - /* - * If wakeup time greater than overhead by a factor of 1000 - * assume that core steady state power dominates the power - * equation - */ - if ((next_wakeup_us >> 10) > lvl_overhead_us) { - pwr = pwr_params->ss_power; - } else { - pwr = pwr_params->ss_power; - pwr -= (lvl_overhead_us * pwr_params->ss_power) / - next_wakeup_us; - pwr += pwr_params->energy_overhead / next_wakeup_us; + if (!i) { + /* + * If the next_wake_us itself is not sufficient for + * deeper low power modes than clock gating do not + * call prediction. + */ + if (next_wakeup_us > max_residency[i]) { + predicted = lpm_cpuidle_predict(dev, cpu, + &idx_restrict, &idx_restrict_time); + if (predicted < min_residency[i]) + predicted = 0; + } else + invalidate_predict_history(dev); } - if (best_level_pwr >= pwr) { - best_level = i; - best_level_pwr = pwr; - if (next_event_us && next_event_us < sleep_us && - (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)) - modified_time_us - = next_event_us - lvl_latency_us; - else - modified_time_us = 0; - } + if (i >= idx_restrict) + break; + + best_level = i; + + if (next_event_us && next_event_us < sleep_us && + (mode != MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT)) + modified_time_us + = next_event_us - lvl_latency_us; + else + modified_time_us = 0; + + if (predicted ? (predicted <= max_residency[i]) + : (next_wakeup_us <= max_residency[i])) + break; } if (modified_time_us) msm_pm_set_timer(modified_time_us); + /* + * Start timer to avoid staying in shallower mode forever + * incase of misprediciton + */ + if ((predicted || (idx_restrict != (cpu->nlevels + 1))) + && ((best_level >= 0) + && (best_level < (cpu->nlevels-1)))) { + htime = predicted + tmr_add; + if (htime == tmr_add) + htime = idx_restrict_time; + else if (htime > max_residency[best_level]) + htime = max_residency[best_level]; + + if ((next_wakeup_us > htime) && + ((next_wakeup_us - htime) > max_residency[best_level])) + histtimer_start(htime); + } + trace_cpu_power_select(best_level, sleep_us, latency_us, next_event_us); + trace_cpu_pred_select(idx_restrict_time ? 2 : (predicted ? 1 : 0), + predicted, htime); + return best_level; } static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, - struct cpumask *mask, bool from_idle) + struct cpumask *mask, bool from_idle, uint32_t *pred_time) { int cpu; int next_cpu = raw_smp_processor_id(); ktime_t next_event; struct cpumask online_cpus_in_cluster; + struct lpm_history *history; + int64_t prediction = LONG_MAX; next_event.tv64 = KTIME_MAX; if (!suspend_wake_time) @@ -538,11 +805,21 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, next_event.tv64 = next_event_c->tv64; next_cpu = cpu; } + + if (from_idle && lpm_prediction) { + history = &per_cpu(hist, cpu); + if (history->stime && (history->stime < prediction)) + prediction = history->stime; + } } if (mask) cpumask_copy(mask, cpumask_of(next_cpu)); + if (from_idle && lpm_prediction) { + if (prediction > ktime_to_us(ktime_get())) + *pred_time = prediction - ktime_to_us(ktime_get()); + } if (ktime_to_us(next_event) > ktime_to_us(ktime_get())) return ktime_to_us(ktime_sub(next_event, ktime_get())); @@ -550,20 +827,193 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster, return 0; } -static int cluster_select(struct lpm_cluster *cluster, bool from_idle) +static int cluster_predict(struct lpm_cluster *cluster, + uint32_t *pred_us) +{ + int i, j; + int ret = 0; + struct cluster_history *history = &cluster->history; + int64_t cur_time = ktime_to_us(ktime_get()); + + if (!lpm_prediction) + return 0; + + if (history->hinvalid) { + history->hinvalid = 0; + history->htmr_wkup = 1; + history->flag = 0; + return ret; + } + + if (history->nsamp == MAXSAMPLES) { + for (i = 0; i < MAXSAMPLES; i++) { + if ((cur_time - history->stime[i]) + > CLUST_SMPL_INVLD_TIME) + history->nsamp--; + } + } + + if (history->nsamp < MAXSAMPLES) { + history->flag = 0; + return ret; + } + + if (history->flag == 2) + history->flag = 0; + + if (history->htmr_wkup != 1) { + uint64_t total = 0; + + if (history->flag == 1) { + for (i = 0; i < MAXSAMPLES; i++) + total += history->resi[i]; + do_div(total, MAXSAMPLES); + *pred_us = total; + return 2; + } + + for (j = 1; j < cluster->nlevels; j++) { + uint32_t failed = 0; + + total = 0; + for (i = 0; i < MAXSAMPLES; i++) { + if ((history->mode[i] == j) && (history->resi[i] + < cluster->levels[j].pwr.min_residency)) { + failed++; + total += history->resi[i]; + } + } + + if (failed > (MAXSAMPLES-2)) { + do_div(total, failed); + *pred_us = total; + history->flag = 1; + return 1; + } + } + } + + return ret; +} + +static void update_cluster_history_time(struct cluster_history *history, + int idx, uint64_t start) +{ + history->entry_idx = idx; + history->entry_time = start; +} + +static void update_cluster_history(struct cluster_history *history, int idx) +{ + uint32_t tmr = 0; + uint32_t residency = 0; + struct lpm_cluster *cluster = + container_of(history, struct lpm_cluster, history); + + if (!lpm_prediction) + return; + + if ((history->entry_idx == -1) || (history->entry_idx == idx)) { + residency = ktime_to_us(ktime_get()) - history->entry_time; + history->stime[history->hptr] = history->entry_time; + } else + return; + + if (history->htmr_wkup) { + if (!history->hptr) + history->hptr = MAXSAMPLES-1; + else + history->hptr--; + + history->resi[history->hptr] += residency; + + history->htmr_wkup = 0; + tmr = 1; + } else { + history->resi[history->hptr] = residency; + } + + history->mode[history->hptr] = idx; + + history->entry_idx = INT_MIN; + history->entry_time = 0; + + if (history->nsamp < MAXSAMPLES) + history->nsamp++; + + trace_cluster_pred_hist(cluster->cluster_name, + history->mode[history->hptr], history->resi[history->hptr], + history->hptr, tmr); + + (history->hptr)++; + + if (history->hptr >= MAXSAMPLES) + history->hptr = 0; +} + +static void clear_cl_history_each(struct cluster_history *history) +{ + int i; + + for (i = 0; i < MAXSAMPLES; i++) { + history->resi[i] = 0; + history->mode[i] = -1; + history->stime[i] = 0; + } + history->hptr = 0; + history->nsamp = 0; + history->flag = 0; + history->hinvalid = 0; + history->htmr_wkup = 0; +} + +static void clear_cl_predict_history(void) +{ + struct lpm_cluster *cluster = lpm_root_node; + struct list_head *list; + + if (!lpm_prediction) + return; + + clear_cl_history_each(&cluster->history); + + list_for_each(list, &cluster->child) { + struct lpm_cluster *n; + + n = list_entry(list, typeof(*n), list); + clear_cl_history_each(&n->history); + } +} + +static int cluster_select(struct lpm_cluster *cluster, bool from_idle, + int *ispred) { int best_level = -1; int i; - uint32_t best_level_pwr = ~0U; - uint32_t pwr; struct cpumask mask; uint32_t latency_us = ~0U; uint32_t sleep_us; + uint32_t cpupred_us = 0, pred_us = 0; + int pred_mode = 0, predicted = 0; if (!cluster) return -EINVAL; - sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle); + sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, + from_idle, &cpupred_us); + + if (from_idle) { + pred_mode = cluster_predict(cluster, &pred_us); + + if (cpupred_us && pred_mode && (cpupred_us < pred_us)) + pred_us = cpupred_us; + + if (pred_us && pred_mode && (pred_us < sleep_us)) + predicted = 1; + + if (predicted && (pred_us == cpupred_us)) + predicted = 2; + } if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus)) latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY, @@ -596,10 +1046,10 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle) continue; if (from_idle && latency_us < pwr_params->latency_us) - continue; + break; if (sleep_us < pwr_params->time_overhead_us) - continue; + break; if (suspend_in_progress && from_idle && level->notify_rpm) continue; @@ -607,21 +1057,21 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle) if (level->notify_rpm && msm_rpm_waiting_for_ack()) continue; - if ((sleep_us >> 10) > pwr_params->time_overhead_us) { - pwr = pwr_params->ss_power; - } else { - pwr = pwr_params->ss_power; - pwr -= (pwr_params->time_overhead_us * - pwr_params->ss_power) / sleep_us; - pwr += pwr_params->energy_overhead / sleep_us; - } + best_level = i; - if (best_level_pwr >= pwr) { - best_level = i; - best_level_pwr = pwr; - } + if (predicted ? (pred_us <= pwr_params->max_residency) + : (sleep_us <= pwr_params->max_residency)) + break; } + if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2)) + cluster->history.flag = 2; + + *ispred = predicted; + + trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us, + latency_us, predicted, pred_us); + return best_level; } @@ -635,7 +1085,7 @@ static void cluster_notify(struct lpm_cluster *cluster, } static int cluster_configure(struct lpm_cluster *cluster, int idx, - bool from_idle) + bool from_idle, int predicted) { struct lpm_cluster_level *level = &cluster->levels[idx]; int ret, i; @@ -653,6 +1103,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, cluster->num_children_in_sync.bits[0], cluster->child_cpus.bits[0], from_idle); lpm_stats_cluster_enter(cluster->stats, idx); + + if (from_idle && lpm_prediction) + update_cluster_history_time(&cluster->history, idx, + ktime_to_us(ktime_get())); } for (i = 0; i < cluster->ndevices; i++) { @@ -664,8 +1118,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, if (level->notify_rpm) { struct cpumask nextcpu, *cpumask; uint64_t us; + uint32_t pred_us; - us = get_cluster_sleep_time(cluster, &nextcpu, from_idle); + us = get_cluster_sleep_time(cluster, &nextcpu, + from_idle, &pred_us); cpumask = level->disable_dynamic_routing ? NULL : &nextcpu; ret = msm_rpm_enter_sleep(0, cpumask); @@ -675,6 +1131,9 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, } us = us + 1; + clear_predict_history(); + clear_cl_predict_history(); + do_div(us, USEC_PER_SEC/SCLK_HZ); msm_mpm_enter_sleep(us, from_idle, cpumask); } @@ -685,6 +1144,15 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0); cluster->last_level = idx; + + if (predicted && (idx < (cluster->nlevels - 1))) { + struct power_params *pwr_params = &cluster->levels[idx].pwr; + + tick_broadcast_exit(); + clusttimer_start(cluster, pwr_params->max_residency + tmr_add); + tick_broadcast_enter(); + } + return 0; failed_set_mode: @@ -703,6 +1171,7 @@ static void cluster_prepare(struct lpm_cluster *cluster, int64_t start_time) { int i; + int predicted = 0; if (!cluster) return; @@ -733,12 +1202,28 @@ static void cluster_prepare(struct lpm_cluster *cluster, &cluster->child_cpus)) goto failed; - i = cluster_select(cluster, from_idle); + i = cluster_select(cluster, from_idle, &predicted); + + if (((i < 0) || (i == cluster->default_level)) + && predicted && from_idle) { + update_cluster_history_time(&cluster->history, + -1, ktime_to_us(ktime_get())); + + if (i < 0) { + struct power_params *pwr_params = + &cluster->levels[0].pwr; + + tick_broadcast_exit(); + clusttimer_start(cluster, + pwr_params->max_residency + tmr_add); + tick_broadcast_enter(); + } + } if (i < 0) goto failed; - if (cluster_configure(cluster, i, from_idle)) + if (cluster_configure(cluster, i, from_idle, predicted)) goto failed; cluster->stats->sleep_time = start_time; @@ -782,6 +1267,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster, &lvl->num_cpu_votes, cpu); } + if (from_idle && first_cpu && + (cluster->last_level == cluster->default_level)) + update_cluster_history(&cluster->history, cluster->last_level); + if (!first_cpu || cluster->last_level == cluster->default_level) goto unlock_return; @@ -823,6 +1312,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster, sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0); cluster_notify(cluster, &cluster->levels[last_level], false); + + if (from_idle) + update_cluster_history(&cluster->history, last_level); + cluster_unprepare(cluster->parent, &cluster->child_cpus, last_level, from_idle, end_time); unlock_return: @@ -1009,6 +1502,39 @@ static int lpm_cpuidle_select(struct cpuidle_driver *drv, return idx; } +static void update_history(struct cpuidle_device *dev, int idx) +{ + struct lpm_history *history = &per_cpu(hist, dev->cpu); + uint32_t tmr = 0; + + if (!lpm_prediction) + return; + + if (history->htmr_wkup) { + if (!history->hptr) + history->hptr = MAXSAMPLES-1; + else + history->hptr--; + + history->resi[history->hptr] += dev->last_residency; + history->htmr_wkup = 0; + tmr = 1; + } else + history->resi[history->hptr] = dev->last_residency; + + history->mode[history->hptr] = idx; + + trace_cpu_pred_hist(history->mode[history->hptr], + history->resi[history->hptr], history->hptr, tmr); + + if (history->nsamp < MAXSAMPLES) + history->nsamp++; + + (history->hptr)++; + if (history->hptr >= MAXSAMPLES) + history->hptr = 0; +} + static int lpm_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { @@ -1043,12 +1569,16 @@ exit: cluster_unprepare(cluster, cpumask, idx, true, end_time); cpu_unprepare(cluster, idx, true); sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0); - - trace_cpu_idle_exit(idx, success); end_time = ktime_to_ns(ktime_get()) - start_time; - dev->last_residency = do_div(end_time, 1000); + do_div(end_time, 1000); + dev->last_residency = end_time; + update_history(dev, idx); + trace_cpu_idle_exit(idx, success); local_irq_enable(); - + if (lpm_prediction) { + histtimer_cancel(); + clusttimer_cancel(); + } return idx; } @@ -1320,6 +1850,8 @@ static int lpm_probe(struct platform_device *pdev) */ suspend_set_ops(&lpm_suspend_ops); hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cluster_timer_init(lpm_root_node); ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID); if (ret) { diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h index 8e05336be21a..3c9665ea8981 100644 --- a/drivers/cpuidle/lpm-levels.h +++ b/drivers/cpuidle/lpm-levels.h @@ -14,6 +14,8 @@ #include <soc/qcom/spm.h> #define NR_LPM_LEVELS 8 +#define MAXSAMPLES 5 +#define CLUST_SMPL_INVLD_TIME 40000 extern bool use_psci; @@ -27,6 +29,9 @@ struct power_params { uint32_t ss_power; /* Steady state power */ uint32_t energy_overhead; /* Enter + exit over head */ uint32_t time_overhead_us; /* Enter + exit overhead */ + uint32_t residencies[NR_LPM_LEVELS]; + uint32_t min_residency; + uint32_t max_residency; }; struct lpm_cpu_level { @@ -55,6 +60,9 @@ struct lpm_level_avail { struct kobject *kobj; struct kobj_attribute idle_enabled_attr; struct kobj_attribute suspend_enabled_attr; + void *data; + int idx; + bool cpu_node; }; struct lpm_cluster_level { @@ -79,6 +87,19 @@ struct low_power_ops { enum msm_pm_l2_scm_flag tz_flag; }; +struct cluster_history { + uint32_t resi[MAXSAMPLES]; + int mode[MAXSAMPLES]; + int64_t stime[MAXSAMPLES]; + uint32_t hptr; + uint32_t hinvalid; + uint32_t htmr_wkup; + uint64_t entry_time; + int entry_idx; + int nsamp; + int flag; +}; + struct lpm_cluster { struct list_head list; struct list_head child; @@ -103,6 +124,8 @@ struct lpm_cluster { unsigned int psci_mode_shift; unsigned int psci_mode_mask; bool no_saw_devices; + struct cluster_history history; + struct hrtimer histtimer; }; int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm); @@ -119,7 +142,8 @@ bool lpm_cpu_mode_allow(unsigned int cpu, unsigned int mode, bool from_idle); bool lpm_cluster_mode_allow(struct lpm_cluster *cluster, unsigned int mode, bool from_idle); - +uint32_t *get_per_cpu_max_residency(int cpu); +uint32_t *get_per_cpu_min_residency(int cpu); extern struct lpm_cluster *lpm_root_node; #ifdef CONFIG_SMP diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index 90aee3cad5ad..625a2640b4c4 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android msm_kgsl_core-y = \ kgsl.o \ kgsl_trace.o \ - kgsl_cmdbatch.o \ + kgsl_drawobj.o \ kgsl_ioctl.o \ kgsl_sharedmem.o \ kgsl_pwrctrl.o \ diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h index 3b29452ce8bd..f3b4e6622043 100644 --- a/drivers/gpu/msm/a5xx_reg.h +++ b/drivers/gpu/msm/a5xx_reg.h @@ -640,6 +640,7 @@ /* UCHE registers */ #define A5XX_UCHE_ADDR_MODE_CNTL 0xE80 +#define A5XX_UCHE_MODE_CNTL 0xE81 #define A5XX_UCHE_WRITE_THRU_BASE_LO 0xE87 #define A5XX_UCHE_WRITE_THRU_BASE_HI 0xE88 #define A5XX_UCHE_TRAP_BASE_LO 0xE89 diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index a3b25b3d8dd1..3615be45b6d9 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -244,6 +244,28 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .core = 5, .major = 4, .minor = 0, + .patchid = 0, + .features = ADRENO_PREEMPTION | ADRENO_64BIT | + ADRENO_CONTENT_PROTECTION | + ADRENO_GPMU | ADRENO_SPTP_PC, + .pm4fw_name = "a530_pm4.fw", + .pfpfw_name = "a530_pfp.fw", + .zap_name = "a540_zap", + .gpudev = &adreno_a5xx_gpudev, + .gmem_size = SZ_1M, + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a540_gpmu.fw2", + .gpmu_major = 3, + .gpmu_minor = 0, + .gpmu_tsens = 0x000C000D, + .max_power = 5448, + }, + { + .gpurev = ADRENO_REV_A540, + .core = 5, + .major = 4, + .minor = 0, .patchid = ANY_ID, .features = ADRENO_PREEMPTION | ADRENO_64BIT | ADRENO_CONTENT_PROTECTION | diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 498386903936..e9d16426d4a5 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -40,6 +40,7 @@ /* Include the master list of GPU cores that are supported */ #include "adreno-gpulist.h" +#include "adreno_dispatch.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "adreno." @@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) - kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer); + if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) + kgsl_free_global(device, &adreno_dev->profile_buffer); /* Free local copies of firmware and other command streams */ kfree(adreno_dev->pfp_fw); @@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device) } /* - * Allocate a small chunk of memory for precise cmdbatch profiling for + * Allocate a small chunk of memory for precise drawobj profiling for * those targets that have the always on timer */ if (!adreno_is_a3xx(adreno_dev)) { int r = kgsl_allocate_global(device, - &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE, + &adreno_dev->profile_buffer, PAGE_SIZE, 0, 0, "alwayson"); - adreno_dev->cmdbatch_profile_index = 0; + adreno_dev->profile_index = 0; if (r == 0) { - set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, + set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv); kgsl_sharedmem_set(device, - &adreno_dev->cmdbatch_profile_buffer, 0, 0, + &adreno_dev->profile_buffer, 0, 0, PAGE_SIZE); } @@ -1653,14 +1654,9 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault) int adreno_reset(struct kgsl_device *device, int fault) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); int ret = -EINVAL; int i = 0; - /* broadcast to HW - reset is coming */ - if (gpudev->pre_reset) - gpudev->pre_reset(adreno_dev); - /* Try soft reset first */ if (adreno_try_soft_reset(device, fault)) { /* Make sure VBIF is cleared before resetting */ @@ -2340,12 +2336,12 @@ int adreno_idle(struct kgsl_device *device) * adreno_drain() - Drain the dispatch queue * @device: Pointer to the KGSL device structure for the GPU * - * Drain the dispatcher of existing command batches. This halts + * Drain the dispatcher of existing drawobjs. This halts * additional commands from being issued until the gate is completed. */ static int adreno_drain(struct kgsl_device *device) { - reinit_completion(&device->cmdbatch_gate); + reinit_completion(&device->halt_gate); return 0; } @@ -2799,6 +2795,18 @@ static void adreno_regulator_disable_poll(struct kgsl_device *device) adreno_iommu_sync(device, false); } +static void adreno_gpu_model(struct kgsl_device *device, char *str, + size_t bufsz) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + snprintf(str, bufsz, "Adreno%d%d%dv%d", + ADRENO_CHIPID_CORE(adreno_dev->chipid), + ADRENO_CHIPID_MAJOR(adreno_dev->chipid), + ADRENO_CHIPID_MINOR(adreno_dev->chipid), + ADRENO_CHIPID_PATCH(adreno_dev->chipid) + 1); +} + static const struct kgsl_functable adreno_functable = { /* Mandatory functions */ .regread = adreno_regread, @@ -2813,7 +2821,7 @@ static const struct kgsl_functable adreno_functable = { .getproperty_compat = adreno_getproperty_compat, .waittimestamp = adreno_waittimestamp, .readtimestamp = adreno_readtimestamp, - .issueibcmds = adreno_ringbuffer_issueibcmds, + .queue_cmds = adreno_dispatcher_queue_cmds, .ioctl = adreno_ioctl, .compat_ioctl = adreno_compat_ioctl, .power_stats = adreno_power_stats, @@ -2835,7 +2843,8 @@ static const struct kgsl_functable adreno_functable = { .regulator_disable = adreno_regulator_disable, .pwrlevel_change_settings = adreno_pwrlevel_change_settings, .regulator_disable_poll = adreno_regulator_disable_poll, - .clk_set_options = adreno_clk_set_options + .clk_set_options = adreno_clk_set_options, + .gpu_model = adreno_gpu_model, }; static struct platform_driver adreno_platform_driver = { diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 286f7d63c8fe..295a3d80d476 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -76,13 +76,13 @@ KGSL_CONTEXT_PREEMPT_STYLE_SHIFT) /* - * return the dispatcher cmdqueue in which the given cmdbatch should + * return the dispatcher drawqueue in which the given drawobj should * be submitted */ -#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \ +#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \ (&((ADRENO_CONTEXT(c->context))->rb->dispatch_q)) -#define ADRENO_CMDBATCH_RB(c) \ +#define ADRENO_DRAWOBJ_RB(c) \ ((ADRENO_CONTEXT(c->context))->rb) /* Adreno core features */ @@ -346,8 +346,8 @@ struct adreno_gpu_core { * @halt: Atomic variable to check whether the GPU is currently halted * @ctx_d_debugfs: Context debugfs node * @pwrctrl_flag: Flag to hold adreno specific power attributes - * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer - * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling + * @profile_buffer: Memdesc holding the drawobj profiling buffer + * @profile_index: Index to store the start/stop ticks in the profiling * buffer * @sp_local_gpuaddr: Base GPU virtual address for SP local memory * @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory @@ -404,8 +404,8 @@ struct adreno_device { struct dentry *ctx_d_debugfs; unsigned long pwrctrl_flag; - struct kgsl_memdesc cmdbatch_profile_buffer; - unsigned int cmdbatch_profile_index; + struct kgsl_memdesc profile_buffer; + unsigned int profile_index; uint64_t sp_local_gpuaddr; uint64_t sp_pvt_gpuaddr; const struct firmware *lm_fw; @@ -441,7 +441,7 @@ struct adreno_device { * @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress * @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't * send any more commands to the ringbuffer) - * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch + * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj * profiling via the ALWAYSON counter * @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption * @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled @@ -459,7 +459,7 @@ enum adreno_device_flags { ADRENO_DEVICE_HANG_INTR = 4, ADRENO_DEVICE_STARTED = 5, ADRENO_DEVICE_FAULT = 6, - ADRENO_DEVICE_CMDBATCH_PROFILE = 7, + ADRENO_DEVICE_DRAWOBJ_PROFILE = 7, ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8, ADRENO_DEVICE_PREEMPTION = 9, ADRENO_DEVICE_SOFT_FAULT_DETECT = 10, @@ -469,22 +469,22 @@ enum adreno_device_flags { }; /** - * struct adreno_cmdbatch_profile_entry - a single command batch entry in the + * struct adreno_drawobj_profile_entry - a single drawobj entry in the * kernel profiling buffer - * @started: Number of GPU ticks at start of the command batch - * @retired: Number of GPU ticks at the end of the command batch + * @started: Number of GPU ticks at start of the drawobj + * @retired: Number of GPU ticks at the end of the drawobj */ -struct adreno_cmdbatch_profile_entry { +struct adreno_drawobj_profile_entry { uint64_t started; uint64_t retired; }; -#define ADRENO_CMDBATCH_PROFILE_COUNT \ - (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry)) +#define ADRENO_DRAWOBJ_PROFILE_COUNT \ + (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry)) -#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \ - ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \ - + offsetof(struct adreno_cmdbatch_profile_entry, _member)) +#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \ + ((_index) * sizeof(struct adreno_drawobj_profile_entry) \ + + offsetof(struct adreno_drawobj_profile_entry, _member)) /** @@ -765,7 +765,6 @@ struct adreno_gpudev { int (*preemption_init)(struct adreno_device *); void (*preemption_schedule)(struct adreno_device *); void (*enable_64bit)(struct adreno_device *); - void (*pre_reset)(struct adreno_device *); void (*clk_set_options)(struct adreno_device *, const char *, struct clk *); }; @@ -776,7 +775,7 @@ struct adreno_gpudev { * @KGSL_FT_REPLAY: Replay the faulting command * @KGSL_FT_SKIPIB: Skip the faulting indirect buffer * @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB - * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch + * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj * @KGSL_FT_TEMP_DISABLE: Disables FT for all commands * @KGSL_FT_THROTTLE: Disable the context if it faults too often * @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB @@ -793,7 +792,7 @@ enum kgsl_ft_policy_bits { /* KGSL_FT_MAX_BITS is used to calculate the mask */ KGSL_FT_MAX_BITS, /* Internal bits - set during GFT */ - /* Skip the PM dump on replayed command batches */ + /* Skip the PM dump on replayed command obj's */ KGSL_FT_SKIP_PMDUMP = 31, }; @@ -882,7 +881,7 @@ int adreno_reset(struct kgsl_device *device, int fault); void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch); + struct kgsl_drawobj *drawobj); int adreno_coresight_init(struct adreno_device *adreno_dev); @@ -1015,6 +1014,12 @@ static inline int adreno_is_a540v1(struct adreno_device *adreno_dev) (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 0); } +static inline int adreno_is_a540v2(struct adreno_device *adreno_dev) +{ + return (ADRENO_GPUREV(adreno_dev) == ADRENO_REV_A540) && + (ADRENO_CHIPID_PATCH(adreno_dev->chipid) == 1); +} + /* * adreno_checkreg_off() - Checks the validity of a register enum * @adreno_dev: Pointer to adreno device diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 1782d1d54946..e67bb92c0c28 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1406,105 +1406,10 @@ static void a530_lm_enable(struct adreno_device *adreno_dev) adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011); } -static bool llm_is_enabled(struct adreno_device *adreno_dev) -{ - unsigned int r; - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r); - return r & (GPMU_BCL_ENABLED | GPMU_LLM_ENABLED); -} - - -static void sleep_llm(struct adreno_device *adreno_dev) -{ - unsigned int r, retry; - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - if (!llm_is_enabled(adreno_dev)) - return; - - kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, &r); - - if ((r & STATE_OF_CHILD) == 0) { - /* If both children are on, sleep CHILD_O1 first */ - kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, - STATE_OF_CHILD, STATE_OF_CHILD_01 | IDLE_FULL_LM_SLEEP); - /* Wait for IDLE_FULL_ACK before continuing */ - for (retry = 0; retry < 5; retry++) { - udelay(1); - kgsl_regread(device, - A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r); - if (r & IDLE_FULL_ACK) - break; - } - - if (retry == 5) - KGSL_CORE_ERR("GPMU: LLM failed to idle: 0x%X\n", r); - } - - /* Now turn off both children */ - kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, - 0, STATE_OF_CHILD | IDLE_FULL_LM_SLEEP); - - /* wait for WAKEUP_ACK to be zero */ - for (retry = 0; retry < 5; retry++) { - udelay(1); - kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r); - if ((r & WAKEUP_ACK) == 0) - break; - } - - if (retry == 5) - KGSL_CORE_ERR("GPMU: LLM failed to sleep: 0x%X\n", r); -} - -static void wake_llm(struct adreno_device *adreno_dev) -{ - unsigned int r, retry; - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - if (!llm_is_enabled(adreno_dev)) - return; - - kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, - STATE_OF_CHILD, STATE_OF_CHILD_01); - - if (((device->pwrctrl.num_pwrlevels - 2) - - device->pwrctrl.active_pwrlevel) <= LM_DCVS_LIMIT) - return; - - udelay(1); - - /* Turn on all children */ - kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, - STATE_OF_CHILD | IDLE_FULL_LM_SLEEP, 0); - - /* Wait for IDLE_FULL_ACK to be zero and WAKEUP_ACK to be set */ - for (retry = 0; retry < 5; retry++) { - udelay(1); - kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r); - if ((r & (WAKEUP_ACK | IDLE_FULL_ACK)) == WAKEUP_ACK) - break; - } - - if (retry == 5) - KGSL_CORE_ERR("GPMU: LLM failed to wake: 0x%X\n", r); -} - -static bool llm_is_awake(struct adreno_device *adreno_dev) -{ - unsigned int r; - struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - - kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r); - return r & WAKEUP_ACK; -} - static void a540_lm_init(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - uint32_t agc_lm_config = + uint32_t agc_lm_config = AGC_BCL_DISABLED | ((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3) << AGC_GPU_VERSION_SHIFT); unsigned int r; @@ -1518,11 +1423,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev) AGC_LM_CONFIG_ISENSE_ENABLE; kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r); - if (!(r & GPMU_BCL_ENABLED)) - agc_lm_config |= AGC_BCL_DISABLED; - - if (r & GPMU_LLM_ENABLED) - agc_lm_config |= AGC_LLM_ENABLED; if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) { KGSL_CORE_ERR( @@ -1551,9 +1451,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev) kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, VOLTAGE_INTR_EN); - - if (lm_on(adreno_dev)) - wake_llm(adreno_dev); } @@ -1665,14 +1562,6 @@ static void a5xx_enable_64bit(struct adreno_device *adreno_dev) kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1); } -static void a5xx_pre_reset(struct adreno_device *adreno_dev) -{ - if (adreno_is_a540(adreno_dev) && lm_on(adreno_dev)) { - if (llm_is_awake(adreno_dev)) - sleep_llm(adreno_dev); - } -} - /* * a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU * @work: Pointer to the work struct for gpmu reset @@ -1707,8 +1596,6 @@ static void a5xx_gpmu_reset(struct work_struct *work) if (a5xx_regulator_enable(adreno_dev)) goto out; - a5xx_pre_reset(adreno_dev); - /* Soft reset of the GPMU block */ kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16)); @@ -1875,6 +1762,11 @@ static void a5xx_start(struct adreno_device *adreno_dev) */ kgsl_regrmw(device, A5XX_RB_DBG_ECO_CNT, 0, (1 << 9)); } + /* + * Disable UCHE global filter as SP can invalidate/flush + * independently + */ + kgsl_regwrite(device, A5XX_UCHE_MODE_CNTL, BIT(29)); /* Set the USE_RETENTION_FLOPS chicken bit */ kgsl_regwrite(device, A5XX_CP_CHICKEN_DBG, 0x02000000); @@ -2147,9 +2039,11 @@ static int _me_init_ucode_workarounds(struct adreno_device *adreno_dev) case ADRENO_REV_A540: /* * WFI after every direct-render 3D mode draw and - * WFI after every 2D Mode 3 draw. + * WFI after every 2D Mode 3 draw. This is needed + * only on a540v1. */ - return 0x0000000A; + if (adreno_is_a540v1(adreno_dev)) + return 0x0000000A; default: return 0x00000000; /* No ucode workarounds enabled */ } @@ -3535,6 +3429,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = { .preemption_init = a5xx_preemption_init, .preemption_schedule = a5xx_preemption_schedule, .enable_64bit = a5xx_enable_64bit, - .pre_reset = a5xx_pre_reset, .clk_set_options = a5xx_clk_set_options, }; diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c index 4baee4a5c0b1..09c550c9f58c 100644 --- a/drivers/gpu/msm/adreno_a5xx_preempt.c +++ b/drivers/gpu/msm/adreno_a5xx_preempt.c @@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev) rb->wptr); rb->dispatch_q.expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + msecs_to_jiffies(adreno_drawobj_timeout); } spin_unlock_irqrestore(&rb->preempt_lock, flags); diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c index 680827e5b848..fffe08038bcd 100644 --- a/drivers/gpu/msm/adreno_debugfs.c +++ b/drivers/gpu/msm/adreno_debugfs.c @@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i, static void sync_event_print(struct seq_file *s, - struct kgsl_cmdbatch_sync_event *sync_event) + struct kgsl_drawobj_sync_event *sync_event) { switch (sync_event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: { @@ -153,12 +153,12 @@ struct flag_entry { const char *str; }; -static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS}; +static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS}; -static const struct flag_entry cmdbatch_priv[] = { - { CMDBATCH_FLAG_SKIP, "skip"}, - { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"}, - { CMDBATCH_FLAG_WFI, "wait_for_idle" }, +static const struct flag_entry cmdobj_priv[] = { + { CMDOBJ_SKIP, "skip"}, + { CMDOBJ_FORCE_PREAMBLE, "force_preamble"}, + { CMDOBJ_WFI, "wait_for_idle" }, }; static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS}; @@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table, seq_puts(s, "None"); } -static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch) +static void syncobj_print(struct seq_file *s, + struct kgsl_drawobj_sync *syncobj) { - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; unsigned int i; - /* print fences first, since they block this cmdbatch */ + seq_puts(s, " syncobj "); - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; - /* - * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways - * so that it is clear if the fence was a separate submit - * or part of an IB submit. - */ - seq_printf(s, "\t%d ", cmdbatch->timestamp); sync_event_print(s, event); seq_puts(s, "\n"); } +} - /* if this flag is set, there won't be an IB */ - if (cmdbatch->flags & KGSL_CONTEXT_SYNC) - return; +static void cmdobj_print(struct seq_file *s, + struct kgsl_drawobj_cmd *cmdobj) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - seq_printf(s, "\t%d: ", cmdbatch->timestamp); + if (drawobj->type == CMDOBJ_TYPE) + seq_puts(s, " cmdobj "); + else + seq_puts(s, " markerobj "); - seq_puts(s, " flags: "); - print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags), - cmdbatch->flags); + seq_printf(s, "\t %d ", drawobj->timestamp); seq_puts(s, " priv: "); - print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv), - cmdbatch->priv); + print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv), + cmdobj->priv); +} + +static void drawobj_print(struct seq_file *s, + struct kgsl_drawobj *drawobj) +{ + if (drawobj->type == SYNCOBJ_TYPE) + syncobj_print(s, SYNCOBJ(drawobj)); + else if ((drawobj->type == CMDOBJ_TYPE) || + (drawobj->type == MARKEROBJ_TYPE)) + cmdobj_print(s, CMDOBJ(drawobj)); + + seq_puts(s, " flags: "); + print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags), + drawobj->flags); seq_puts(s, "\n"); } @@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused) queued, consumed, retired, drawctxt->internal_timestamp); - seq_puts(s, "cmdqueue:\n"); + seq_puts(s, "drawqueue:\n"); spin_lock(&drawctxt->lock); - for (i = drawctxt->cmdqueue_head; - i != drawctxt->cmdqueue_tail; - i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE)) - cmdbatch_print(s, drawctxt->cmdqueue[i]); + for (i = drawctxt->drawqueue_head; + i != drawctxt->drawqueue_tail; + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) + drawobj_print(s, drawctxt->drawqueue[i]); spin_unlock(&drawctxt->lock); seq_puts(s, "events:\n"); diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index 522c32743d3d..cb4108b4e1f9 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -25,7 +25,7 @@ #include "adreno_trace.h" #include "kgsl_sharedmem.h" -#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) +#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) /* Time in ms after which the dispatcher tries to schedule an unscheduled RB */ unsigned int adreno_dispatch_starvation_time = 2000; @@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25; unsigned int adreno_disp_preempt_fair_sched; /* Number of commands that can be queued in a context before it sleeps */ -static unsigned int _context_cmdqueue_size = 50; +static unsigned int _context_drawqueue_size = 50; /* Number of milliseconds to wait for the context queue to clear */ static unsigned int _context_queue_wait = 10000; -/* Number of command batches sent at a time from a single context */ -static unsigned int _context_cmdbatch_burst = 5; +/* Number of drawobjs sent at a time from a single context */ +static unsigned int _context_drawobj_burst = 5; /* * GFT throttle parameters. If GFT recovered more than @@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15; static unsigned int _dispatcher_q_inflight_lo = 4; /* Command batch timeout (in milliseconds) */ -unsigned int adreno_cmdbatch_timeout = 2000; +unsigned int adreno_drawobj_timeout = 2000; /* Interval for reading and comparing fault detection registers */ static unsigned int _fault_timer_interval = 200; -#define CMDQUEUE_RB(_cmdqueue) \ +#define DRAWQUEUE_RB(_drawqueue) \ ((struct adreno_ringbuffer *) \ - container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q)) + container_of((_drawqueue),\ + struct adreno_ringbuffer, dispatch_q)) -#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) +#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q) -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue); +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue); -static inline bool cmdqueue_is_current( - struct adreno_dispatcher_cmdqueue *cmdqueue) +static inline bool drawqueue_is_current( + struct adreno_dispatcher_drawqueue *drawqueue) { - struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue); + struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue); struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); return (adreno_dev->cur_rb == rb); @@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data) return time_after(jiffies, expires) ? 0 : 1; } -static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) +static int __count_drawqueue_context(struct adreno_context *drawctxt, + void *data) { unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100); @@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data) return 0; return (&drawctxt->rb->dispatch_q == - (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0; + (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0; } static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, @@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev, } static void _track_context(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue, + struct adreno_dispatcher_drawqueue *drawqueue, struct adreno_context *drawctxt) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); @@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev, device->active_context_count = _adreno_count_active_contexts(adreno_dev, __count_context, NULL); - cmdqueue->active_context_count = + drawqueue->active_context_count = _adreno_count_active_contexts(adreno_dev, - __count_cmdqueue_context, cmdqueue); + __count_drawqueue_context, drawqueue); spin_unlock(&adreno_dev->active_list_lock); } @@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev, */ static inline int -_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue) +_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue) { - return (cmdqueue->active_context_count > 1) + return (drawqueue->active_context_count > 1) ? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi; } @@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev) } /** - * _retire_marker() - Retire a marker command batch without sending it to the - * hardware - * @cmdbatch: Pointer to the cmdbatch to retire + * _retire_timestamp() - Retire object without sending it + * to the hardware + * @drawobj: Pointer to the object to retire * - * In some cases marker commands can be retired by the software without going to - * the GPU. In those cases, update the memstore from the CPU, kick off the - * event engine to handle expired events and destroy the command batch. + * In some cases ibs can be retired by the software + * without going to the GPU. In those cases, update the + * memstore from the CPU, kick off the event engine to handle + * expired events and destroy the ib. */ -static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) +static void _retire_timestamp(struct kgsl_drawobj *drawobj) { - struct kgsl_context *context = cmdbatch->context; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_context *context = drawobj->context; + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); struct kgsl_device *device = context->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); /* * Write the start and end timestamp to the memstore to keep the @@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) */ kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp), - cmdbatch->timestamp); + drawobj->timestamp); /* Retire pending GPU events for the object */ @@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch) * rptr scratch out address. At this point GPU clocks turned off. * So avoid reading GPU register directly for A3xx. */ - if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, - 0); + if (adreno_is_a3xx(ADRENO_DEVICE(device))) + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, + 0, 0); else - trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb, - adreno_get_rptr(drawctxt->rb)); - kgsl_cmdbatch_destroy(cmdbatch); + trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb, + adreno_get_rptr(drawctxt->rb), 0); + kgsl_drawobj_destroy(drawobj); } static int _check_context_queue(struct adreno_context *drawctxt) @@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt) if (kgsl_context_invalid(&drawctxt->base)) ret = 1; else - ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0; + ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0; spin_unlock(&drawctxt->lock); @@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt) * return true if this is a marker command and the dependent timestamp has * retired */ -static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch) -{ - return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - kgsl_check_timestamp(cmdbatch->device, cmdbatch->context, - cmdbatch->marker_timestamp); -} - -static inline void _pop_cmdbatch(struct adreno_context *drawctxt) +static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj) { - drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head, - ADRENO_CONTEXT_CMDQUEUE_SIZE); - drawctxt->queued--; -} -/** - * Removes all expired marker and sync cmdbatches from - * the context queue when marker command and dependent - * timestamp are retired. This function is recursive. - * returns cmdbatch if context has command, NULL otherwise. - */ -static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt) -{ - struct kgsl_cmdbatch *cmdbatch; - - if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail) - return NULL; - - cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head]; - - if (cmdbatch == NULL) - return NULL; + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); - /* Check to see if this is a marker we can skip over */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - _marker_expired(cmdbatch)) { - _pop_cmdbatch(drawctxt); - _retire_marker(cmdbatch); - return _expire_markers(drawctxt); - } - - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - if (!kgsl_cmdbatch_events_pending(cmdbatch)) { - _pop_cmdbatch(drawctxt); - kgsl_cmdbatch_destroy(cmdbatch); - return _expire_markers(drawctxt); - } - } - - return cmdbatch; + return (drawobj->flags & KGSL_DRAWOBJ_MARKER) && + kgsl_check_timestamp(drawobj->device, drawobj->context, + markerobj->marker_timestamp); } -static void expire_markers(struct adreno_context *drawctxt) +static inline void _pop_drawobj(struct adreno_context *drawctxt) { - spin_lock(&drawctxt->lock); - _expire_markers(drawctxt); - spin_unlock(&drawctxt->lock); + drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head, + ADRENO_CONTEXT_DRAWQUEUE_SIZE); + drawctxt->queued--; } -static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt) +static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj, + struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; - bool pending = false; - - cmdbatch = _expire_markers(drawctxt); - - if (cmdbatch == NULL) - return NULL; + if (_marker_expired(cmdobj)) { + _pop_drawobj(drawctxt); + _retire_timestamp(DRAWOBJ(cmdobj)); + return 0; + } /* - * If the marker isn't expired but the SKIP bit is set - * then there are real commands following this one in - * the queue. This means that we need to dispatch the - * command so that we can keep the timestamp accounting - * correct. If skip isn't set then we block this queue + * If the marker isn't expired but the SKIP bit + * is set then there are real commands following + * this one in the queue. This means that we + * need to dispatch the command so that we can + * keep the timestamp accounting correct. If + * skip isn't set then we block this queue * until the dependent timestamp expires */ - if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) && - (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) - pending = true; + return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN; +} - if (kgsl_cmdbatch_events_pending(cmdbatch)) - pending = true; +static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj, + struct adreno_context *drawctxt) +{ + if (!kgsl_drawobj_events_pending(syncobj)) { + _pop_drawobj(drawctxt); + kgsl_drawobj_destroy(DRAWOBJ(syncobj)); + return 0; + } /* - * If changes are pending and the canary timer hasn't been - * started yet, start it + * If we got here, there are pending events for sync object. + * Start the canary timer if it hasnt been started already. */ - if (pending) { - /* - * If syncpoints are pending start the canary timer if - * it hasn't already been started - */ - if (!cmdbatch->timeout_jiffies) { - cmdbatch->timeout_jiffies = - jiffies + msecs_to_jiffies(5000); - mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies); - } - - return ERR_PTR(-EAGAIN); + if (!syncobj->timeout_jiffies) { + syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000); + mod_timer(&syncobj->timer, syncobj->timeout_jiffies); } - _pop_cmdbatch(drawctxt); - return cmdbatch; + return -EAGAIN; } -/** - * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue - * @drawctxt: Pointer to the adreno draw context - * - * Dequeue a new command batch from the context list +/* + * Retires all expired marker and sync objs from the context + * queue and returns one of the below + * a) next drawobj that needs to be sent to ringbuffer + * b) -EAGAIN for syncobj with syncpoints pending. + * c) -EAGAIN for markerobj whose marker timestamp has not expired yet. + * c) NULL for no commands remaining in drawqueue. */ -static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch( - struct adreno_context *drawctxt) +static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj( + struct adreno_context *drawctxt) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; + unsigned int i = drawctxt->drawqueue_head; + int ret = 0; - spin_lock(&drawctxt->lock); - cmdbatch = _get_cmdbatch(drawctxt); - spin_unlock(&drawctxt->lock); + if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail) + return NULL; - /* - * Delete the timer and wait for timer handler to finish executing - * on another core before queueing the buffer. We must do this - * without holding any spin lock that the timer handler might be using - */ - if (!IS_ERR_OR_NULL(cmdbatch)) - del_timer_sync(&cmdbatch->timer); + for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail; + i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) { + + drawobj = drawctxt->drawqueue[i]; + + if (drawobj == NULL) + return NULL; + + if (drawobj->type == CMDOBJ_TYPE) + return drawobj; + else if (drawobj->type == MARKEROBJ_TYPE) { + ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt); + /* Special case where marker needs to be sent to GPU */ + if (ret == 1) + return drawobj; + } else if (drawobj->type == SYNCOBJ_TYPE) + ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt); + + if (ret == -EAGAIN) + return ERR_PTR(-EAGAIN); + + continue; + } - return cmdbatch; + return NULL; } /** - * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context + * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context * queue * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the KGSL cmdbatch to requeue + * @cmdobj: Pointer to the KGSL command object to requeue * * Failure to submit a command to the ringbuffer isn't the fault of the command * being submitted so if a failure happens, push it back on the head of the the * context queue to be reconsidered again unless the context got detached. */ -static inline int adreno_dispatcher_requeue_cmdbatch( - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch) +static inline int adreno_dispatcher_requeue_cmdobj( + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { unsigned int prev; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); spin_lock(&drawctxt->lock); if (kgsl_context_detached(&drawctxt->base) || kgsl_context_invalid(&drawctxt->base)) { spin_unlock(&drawctxt->lock); - /* get rid of this cmdbatch since the context is bad */ - kgsl_cmdbatch_destroy(cmdbatch); + /* get rid of this drawobj since the context is bad */ + kgsl_drawobj_destroy(drawobj); return -ENOENT; } - prev = drawctxt->cmdqueue_head == 0 ? - (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) : - (drawctxt->cmdqueue_head - 1); + prev = drawctxt->drawqueue_head == 0 ? + (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) : + (drawctxt->drawqueue_head - 1); /* * The maximum queue size always needs to be one less then the size of - * the ringbuffer queue so there is "room" to put the cmdbatch back in + * the ringbuffer queue so there is "room" to put the drawobj back in */ - BUG_ON(prev == drawctxt->cmdqueue_tail); + WARN_ON(prev == drawctxt->drawqueue_tail); - drawctxt->cmdqueue[prev] = cmdbatch; + drawctxt->drawqueue[prev] = drawobj; drawctxt->queued++; /* Reset the command queue head to reflect the newly requeued change */ - drawctxt->cmdqueue_head = prev; + drawctxt->drawqueue_head = prev; spin_unlock(&drawctxt->lock); return 0; } @@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev, } /** - * sendcmd() - Send a command batch to the GPU hardware + * sendcmd() - Send a drawobj to the GPU hardware * @dispatcher: Pointer to the adreno dispatcher struct - * @cmdbatch: Pointer to the KGSL cmdbatch being sent + * @drawobj: Pointer to the KGSL drawobj being sent * - * Send a KGSL command batch to the GPU hardware + * Send a KGSL drawobj to the GPU hardware */ static int sendcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); + struct adreno_dispatcher_drawqueue *dispatch_q = + ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj); struct adreno_submit_time time; uint64_t secs = 0; unsigned long nsecs = 0; @@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev, set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv); } - if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) { - set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv); - cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index; - adreno_dev->cmdbatch_profile_index = - (adreno_dev->cmdbatch_profile_index + 1) % - ADRENO_CMDBATCH_PROFILE_COUNT; + if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) { + set_bit(CMDOBJ_PROFILE, &cmdobj->priv); + cmdobj->profile_index = adreno_dev->profile_index; + adreno_dev->profile_index = + (adreno_dev->profile_index + 1) % + ADRENO_DRAWOBJ_PROFILE_COUNT; } - ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time); + ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time); /* * On the first command, if the submission was successful, then read the @@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev, secs = time.ktime; nsecs = do_div(secs, 1000000000); - trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight, + trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight, time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb, adreno_get_rptr(drawctxt->rb)); mutex_unlock(&device->mutex); - cmdbatch->submit_ticks = time.ticks; + cmdobj->submit_ticks = time.ticks; - dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch; + dispatch_q->cmd_q[dispatch_q->tail] = cmdobj; dispatch_q->tail = (dispatch_q->tail + 1) % - ADRENO_DISPATCH_CMDQUEUE_SIZE; + ADRENO_DISPATCH_DRAWQUEUE_SIZE; /* * For the first submission in any given command queue update the @@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev, if (dispatch_q->inflight == 1) dispatch_q->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + msecs_to_jiffies(adreno_drawobj_timeout); /* * If we believe ourselves to be current and preemption isn't a thing, @@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev, * thing and the timer will be set up in due time */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - if (cmdqueue_is_current(dispatch_q)) + if (drawqueue_is_current(dispatch_q)) mod_timer(&dispatcher->timer, dispatch_q->expires); } @@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev, static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev, struct adreno_context *drawctxt) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(drawctxt->rb->dispatch_q); int count = 0; int ret = 0; - int inflight = _cmdqueue_inflight(dispatch_q); + int inflight = _drawqueue_inflight(dispatch_q); unsigned int timestamp; if (dispatch_q->inflight >= inflight) { - expire_markers(drawctxt); + spin_lock(&drawctxt->lock); + _process_drawqueue_get_next_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); return -EBUSY; } /* - * Each context can send a specific number of command batches per cycle + * Each context can send a specific number of drawobjs per cycle */ - while ((count < _context_cmdbatch_burst) && + while ((count < _context_drawobj_burst) && (dispatch_q->inflight < inflight)) { - struct kgsl_cmdbatch *cmdbatch; + struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; if (adreno_gpu_fault(adreno_dev) != 0) break; - cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt); + spin_lock(&drawctxt->lock); + drawobj = _process_drawqueue_get_next_drawobj(drawctxt); /* - * adreno_context_get_cmdbatch returns -EAGAIN if the current - * cmdbatch has pending sync points so no more to do here. + * adreno_context_get_drawobj returns -EAGAIN if the current + * drawobj has pending sync points so no more to do here. * When the sync points are satisfied then the context will get * reqeueued */ - if (IS_ERR_OR_NULL(cmdbatch)) { - if (IS_ERR(cmdbatch)) - ret = PTR_ERR(cmdbatch); + if (IS_ERR_OR_NULL(drawobj)) { + if (IS_ERR(drawobj)) + ret = PTR_ERR(drawobj); + spin_unlock(&drawctxt->lock); break; } + _pop_drawobj(drawctxt); + spin_unlock(&drawctxt->lock); - /* - * If this is a synchronization submission then there are no - * commands to submit. Discard it and get the next item from - * the queue. Decrement count so this packet doesn't count - * against the burst for the context - */ - - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - kgsl_cmdbatch_destroy(cmdbatch); - continue; - } - - timestamp = cmdbatch->timestamp; - - ret = sendcmd(adreno_dev, cmdbatch); + timestamp = drawobj->timestamp; + cmdobj = CMDOBJ(drawobj); + ret = sendcmd(adreno_dev, cmdobj); /* - * On error from sendcmd() try to requeue the command batch + * On error from sendcmd() try to requeue the cmdobj * unless we got back -ENOENT which means that the context has * been detached and there will be no more deliveries from here */ if (ret != 0) { - /* Destroy the cmdbatch on -ENOENT */ + /* Destroy the cmdobj on -ENOENT */ if (ret == -ENOENT) - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); else { /* * If the requeue returns an error, return that * instead of whatever sendcmd() sent us */ - int r = adreno_dispatcher_requeue_cmdbatch( - drawctxt, cmdbatch); + int r = adreno_dispatcher_requeue_cmdobj( + drawctxt, cmdobj); if (r) ret = r; } @@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev) /** * get_timestamp() - Return the next timestamp for the context * @drawctxt - Pointer to an adreno draw context struct - * @cmdbatch - Pointer to a command batch + * @drawobj - Pointer to a drawobj * @timestamp - Pointer to a timestamp value possibly passed from the user + * @user_ts - user generated timestamp * * Assign a timestamp based on the settings of the draw context and the command * batch. */ static int get_timestamp(struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp) + struct kgsl_drawobj *drawobj, unsigned int *timestamp, + unsigned int user_ts) { - /* Synchronization commands don't get a timestamp */ - if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) { - *timestamp = 0; - return 0; - } if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) { /* * User specified timestamps need to be greater than the last * issued timestamp in the context */ - if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0) + if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) return -ERANGE; - drawctxt->timestamp = *timestamp; + drawctxt->timestamp = user_ts; } else drawctxt->timestamp++; *timestamp = drawctxt->timestamp; + drawobj->timestamp = *timestamp; return 0; } -/** - * adreno_dispactcher_queue_cmd() - Queue a new command in the context - * @adreno_dev: Pointer to the adreno device struct - * @drawctxt: Pointer to the adreno draw context - * @cmdbatch: Pointer to the command batch being submitted - * @timestamp: Pointer to the requested timestamp - * - * Queue a command in the context - if there isn't any room in the queue, then - * block until there is - */ -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp) +static void _set_ft_policy(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) { - struct adreno_dispatcher_cmdqueue *dispatch_q = - ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch); - int ret; - - spin_lock(&drawctxt->lock); - - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } + /* + * Set the fault tolerance policy for the command batch - assuming the + * context hasn't disabled FT use the current device policy + */ + if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy); + else + cmdobj->fault_policy = adreno_dev->ft_policy; +} +static void _cmdobj_set_flags(struct adreno_context *drawctxt, + struct kgsl_drawobj_cmd *cmdobj) +{ /* * Force the preamble for this submission only - this is usually * requested by the dispatcher as part of fault recovery */ - if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* - * Force the premable if set from userspace in the context or cmdbatch - * flags + * Force the premable if set from userspace in the context or + * command obj flags */ - if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) || - (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH)) - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH)) + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); - /* Skip this cmdbatch commands if IFH_NOP is enabled */ + /* Skip this ib if IFH_NOP is enabled */ if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP) - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* * If we are waiting for the end of frame and it hasn't appeared yet, - * then mark the command batch as skipped. It will still progress + * then mark the command obj as skipped. It will still progress * through the pipeline but it won't actually send any commands */ if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) { - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); + set_bit(CMDOBJ_SKIP, &cmdobj->priv); /* - * If this command batch represents the EOF then clear the way + * If this command obj represents the EOF then clear the way * for the dispatcher to continue submitting */ - if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) { + if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) { clear_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv); @@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, &drawctxt->base.priv); } } +} - /* Wait for room in the context queue */ +static inline int _check_context_state(struct kgsl_context *context) +{ + if (kgsl_context_invalid(context)) + return -EDEADLK; + + if (kgsl_context_detached(context)) + return -ENOENT; + + return 0; +} + +static inline bool _verify_ib(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_memobj_node *ib) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_process_private *private = dev_priv->process_priv; + + /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ + if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { + pr_context(device, context, "ctxt %d invalid ib size %lld\n", + context->id, ib->size); + return false; + } + + /* Make sure that the address is mapped */ + if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { + pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", + context->id, ib->gpuaddr); + return false; + } + + return true; +} + +static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count) +{ + struct kgsl_device *device = dev_priv->device; + struct kgsl_memobj_node *ib; + unsigned int i; + + for (i = 0; i < count; i++) { + /* Verify the IBs before they get queued */ + if (drawobj[i]->type == CMDOBJ_TYPE) { + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]); + + list_for_each_entry(ib, &cmdobj->cmdlist, node) + if (_verify_ib(dev_priv, + &ADRENO_CONTEXT(context)->base, ib) + == false) + return -EINVAL; + /* + * Clear the wake on touch bit to indicate an IB has + * been submitted since the last time we set it. + * But only clear it when we have rendering commands. + */ + device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; + } + + /* A3XX does not have support for drawobj profiling */ + if (adreno_is_a3xx(ADRENO_DEVICE(device)) && + (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING)) + return -EOPNOTSUPP; + } - while (drawctxt->queued >= _context_cmdqueue_size) { + return 0; +} + +static inline int _wait_for_room_in_context_queue( + struct adreno_context *drawctxt) +{ + int ret = 0; + + /* Wait for room in the context queue */ + while (drawctxt->queued >= _context_drawqueue_size) { trace_adreno_drawctxt_sleep(drawctxt); spin_unlock(&drawctxt->lock); @@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, spin_lock(&drawctxt->lock); trace_adreno_drawctxt_wake(drawctxt); - if (ret <= 0) { - spin_unlock(&drawctxt->lock); + if (ret <= 0) return (ret == 0) ? -ETIMEDOUT : (int) ret; - } } + + return 0; +} + +static unsigned int _check_context_state_to_queue_cmds( + struct adreno_context *drawctxt) +{ + int ret = _check_context_state(&drawctxt->base); + + if (ret) + return ret; + + ret = _wait_for_room_in_context_queue(drawctxt); + if (ret) + return ret; + /* * Account for the possiblity that the context got invalidated * while we were sleeping */ + return _check_context_state(&drawctxt->base); +} - if (kgsl_context_invalid(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -EDEADLK; - } - if (kgsl_context_detached(&drawctxt->base)) { - spin_unlock(&drawctxt->lock); - return -ENOENT; - } +static void _queue_drawobj(struct adreno_context *drawctxt, + struct kgsl_drawobj *drawobj) +{ + /* Put the command into the queue */ + drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj; + drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; + drawctxt->queued++; + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); +} - ret = get_timestamp(drawctxt, cmdbatch, timestamp); - if (ret) { - spin_unlock(&drawctxt->lock); +static int _queue_markerobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj); + int ret; + + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) return ret; + + /* + * See if we can fastpath this thing - if nothing is queued + * and nothing is inflight retire without bothering the GPU + */ + if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device, + drawobj->context, drawctxt->queued_timestamp)) { + trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued); + _retire_timestamp(drawobj); + return 1; } - cmdbatch->timestamp = *timestamp; + /* + * Remember the last queued timestamp - the marker will block + * until that timestamp is expired (unless another command + * comes along and forces the marker to execute) + */ - if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) { + markerobj->marker_timestamp = drawctxt->queued_timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, markerobj); + _cmdobj_set_flags(drawctxt, markerobj); - /* - * See if we can fastpath this thing - if nothing is queued - * and nothing is inflight retire without bothering the GPU - */ + _queue_drawobj(drawctxt, drawobj); - if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device, - cmdbatch->context, drawctxt->queued_timestamp)) { - trace_adreno_cmdbatch_queued(cmdbatch, - drawctxt->queued); + return 0; +} - _retire_marker(cmdbatch); - spin_unlock(&drawctxt->lock); - return 0; - } +static int _queue_cmdobj(struct adreno_device *adreno_dev, + struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj, + uint32_t *timestamp, unsigned int user_ts) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + unsigned int j; + int ret; - /* - * Remember the last queued timestamp - the marker will block - * until that timestamp is expired (unless another command - * comes along and forces the marker to execute) - */ + ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts); + if (ret) + return ret; + + /* + * If this is a real command then we need to force any markers + * queued before it to dispatch to keep time linear - set the + * skip bit so the commands get NOPed. + */ + j = drawctxt->drawqueue_head; + + while (j != drawctxt->drawqueue_tail) { + if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) { + struct kgsl_drawobj_cmd *markerobj = + CMDOBJ(drawctxt->drawqueue[j]); + set_bit(CMDOBJ_SKIP, &markerobj->priv); + } - cmdbatch->marker_timestamp = drawctxt->queued_timestamp; + j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE); } - /* SYNC commands have timestamp 0 and will get optimized out anyway */ - if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC)) - drawctxt->queued_timestamp = *timestamp; + drawctxt->queued_timestamp = *timestamp; + _set_ft_policy(adreno_dev, drawctxt, cmdobj); + _cmdobj_set_flags(drawctxt, cmdobj); - /* - * Set the fault tolerance policy for the command batch - assuming the - * context hasn't disabled FT use the current device policy - */ + _queue_drawobj(drawctxt, drawobj); - if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) - set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy); - else - cmdbatch->fault_policy = adreno_dev->ft_policy; + return 0; +} - /* Put the command into the queue */ - drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch; - drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) % - ADRENO_CONTEXT_CMDQUEUE_SIZE; +static void _queue_syncobj(struct adreno_context *drawctxt, + struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp) +{ + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); - /* - * If this is a real command then we need to force any markers queued - * before it to dispatch to keep time linear - set the skip bit so - * the commands get NOPed. - */ + *timestamp = 0; + drawobj->timestamp = 0; - if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) { - unsigned int i = drawctxt->cmdqueue_head; + _queue_drawobj(drawctxt, drawobj); +} - while (i != drawctxt->cmdqueue_tail) { - if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER) - set_bit(CMDBATCH_FLAG_SKIP, - &drawctxt->cmdqueue[i]->priv); +/** + * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context + * @dev_priv: Pointer to the device private struct + * @context: Pointer to the kgsl draw context + * @drawobj: Pointer to the array of drawobj's being submitted + * @count: Number of drawobj's being submitted + * @timestamp: Pointer to the requested timestamp + * + * Queue a command in the context - if there isn't any room in the queue, then + * block until there is + */ +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp) - i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE); +{ + struct kgsl_device *device = dev_priv->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_context *drawctxt = ADRENO_CONTEXT(context); + struct adreno_dispatcher_drawqueue *dispatch_q; + int ret; + unsigned int i, user_ts; + + ret = _check_context_state(&drawctxt->base); + if (ret) + return ret; + + ret = _verify_cmdobj(dev_priv, context, drawobj, count); + if (ret) + return ret; + + /* wait for the suspend gate */ + wait_for_completion(&device->halt_gate); + + spin_lock(&drawctxt->lock); + + ret = _check_context_state_to_queue_cmds(drawctxt); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + + user_ts = *timestamp; + + for (i = 0; i < count; i++) { + + switch (drawobj[i]->type) { + case MARKEROBJ_TYPE: + ret = _queue_markerobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret == 1) { + spin_unlock(&drawctxt->lock); + goto done; + } else if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case CMDOBJ_TYPE: + ret = _queue_cmdobj(adreno_dev, drawctxt, + CMDOBJ(drawobj[i]), + timestamp, user_ts); + if (ret) { + spin_unlock(&drawctxt->lock); + return ret; + } + break; + case SYNCOBJ_TYPE: + _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]), + timestamp); + break; + default: + spin_unlock(&drawctxt->lock); + return -EINVAL; } + } - drawctxt->queued++; - trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued); + dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]); _track_context(adreno_dev, dispatch_q, drawctxt); @@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, * queue will try to schedule new commands anyway. */ - if (dispatch_q->inflight < _context_cmdbatch_burst) + if (dispatch_q->inflight < _context_drawobj_burst) adreno_dispatcher_issuecmds(adreno_dev); +done: + if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) + return -EPROTO; return 0; } @@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id) } /* - * If an IB inside of the command batch has a gpuaddr that matches the base + * If an IB inside of the drawobj has a gpuaddr that matches the base * passed in then zero the size which effectively skips it when it is submitted * in the ringbuffer. */ -static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) +static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base) { struct kgsl_memobj_node *ib; - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { if (ib->gpuaddr == base) { ib->priv |= MEMOBJ_SKIP; if (base) @@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base) } } -static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int i; /* @@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, * b) force preamble for next commandbatch */ for (i = 1; i < count; i++) { - if (replay[i]->context->id == cmdbatch->context->id) { + if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) { replay[i]->fault_policy = replay[0]->fault_policy; - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery); break; } @@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch, drawctxt->fault_policy = replay[0]->fault_policy; } - /* set the flags to skip this cmdbatch */ - set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv); - cmdbatch->fault_recovery = 0; + /* set the flags to skip this cmdobj */ + set_bit(CMDOBJ_SKIP, &cmdobj->priv); + cmdobj->fault_recovery = 0; } -static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmdbatch **replay, int count) +static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj, + struct kgsl_drawobj_cmd **replay, int count) { - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); int skip = 1; int i; for (i = 0; i < count; i++) { + struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]); + /* - * Only operate on command batches that belong to the + * Only operate on drawobj's that belong to the * faulting context */ - if (replay[i]->context->id != cmdbatch->context->id) + if (replay_obj->context->id != drawobj->context->id) continue; /* - * Skip all the command batches in this context until + * Skip all the drawobjs in this context until * the EOF flag is seen. If the EOF flag is seen then * force the preamble for the next command. */ if (skip) { - set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv); + set_bit(CMDOBJ_SKIP, &replay[i]->priv); - if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME) + if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME) skip = 0; } else { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); return; } } @@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch, set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv); } -static void remove_invalidated_cmdbatches(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count) +static void remove_invalidated_cmdobjs(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count) { int i; for (i = 0; i < count; i++) { - struct kgsl_cmdbatch *cmd = replay[i]; - if (cmd == NULL) + struct kgsl_drawobj_cmd *cmdobj = replay[i]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + if (cmdobj == NULL) continue; - if (kgsl_context_detached(cmd->context) || - kgsl_context_invalid(cmd->context)) { + if (kgsl_context_detached(drawobj->context) || + kgsl_context_invalid(drawobj->context)) { replay[i] = NULL; mutex_lock(&device->mutex); kgsl_cancel_events_timestamp(device, - &cmd->context->events, cmd->timestamp); + &drawobj->context->events, drawobj->timestamp); mutex_unlock(&device->mutex); - kgsl_cmdbatch_destroy(cmd); + kgsl_drawobj_destroy(drawobj); } } } @@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context) static void adreno_fault_header(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); unsigned int status, rptr, wptr, ib1sz, ib2sz; uint64_t ib1base, ib2base; @@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device, ADRENO_REG_CP_IB2_BASE_HI, &ib2base); adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz); - if (cmdbatch != NULL) { + if (drawobj != NULL) { struct adreno_context *drawctxt = - ADRENO_CONTEXT(cmdbatch->context); + ADRENO_CONTEXT(drawobj->context); - trace_adreno_gpu_fault(cmdbatch->context->id, - cmdbatch->timestamp, + trace_adreno_gpu_fault(drawobj->context->id, + drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz, drawctxt->rb->id); - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", - cmdbatch->context->id, cmdbatch->timestamp, status, + drawobj->context->id, drawobj->timestamp, status, rptr, wptr, ib1base, ib1sz, ib2base, ib2sz); if (rb != NULL) - pr_fault(device, cmdbatch, + pr_fault(device, drawobj, "gpu fault rb %d rb sw r/w %4.4x/%4.4x\n", rb->id, rptr, rb->wptr); } else { @@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device, void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev, struct adreno_context *drawctxt, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && kgsl_context_detached(&drawctxt->base)) { - pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context, - "gpu detached context %d\n", cmdbatch->context->id); + pr_context(KGSL_DEVICE(adreno_dev), drawobj->context, + "gpu detached context %d\n", drawobj->context->id); clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv); } } /** - * process_cmdbatch_fault() - Process a cmdbatch for fault policies - * @device: Device on which the cmdbatch caused a fault - * @replay: List of cmdbatches that are to be replayed on the device. The - * faulting cmdbatch is the first command in the replay list and the remaining - * cmdbatches in the list are commands that were submitted to the same queue + * process_cmdobj_fault() - Process a cmdobj for fault policies + * @device: Device on which the cmdobj caused a fault + * @replay: List of cmdobj's that are to be replayed on the device. The + * first command in the replay list is the faulting command and the remaining + * cmdobj's in the list are commands that were submitted to the same queue * as the faulting one. - * @count: Number of cmdbatches in replay + * @count: Number of cmdobj's in replay * @base: The IB1 base at the time of fault * @fault: The fault type */ -static void process_cmdbatch_fault(struct kgsl_device *device, - struct kgsl_cmdbatch **replay, int count, +static void process_cmdobj_fault(struct kgsl_device *device, + struct kgsl_drawobj_cmd **replay, int count, unsigned int base, int fault) { - struct kgsl_cmdbatch *cmdbatch = replay[0]; + struct kgsl_drawobj_cmd *cmdobj = replay[0]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); int i; char *state = "failed"; @@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * where 1st and 4th gpu hang are more than 3 seconds apart we * won't disable GFT and invalidate the context. */ - if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) { - if (time_after(jiffies, (cmdbatch->context->fault_time + if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) { + if (time_after(jiffies, (drawobj->context->fault_time + msecs_to_jiffies(_fault_throttle_time)))) { - cmdbatch->context->fault_time = jiffies; - cmdbatch->context->fault_count = 1; + drawobj->context->fault_time = jiffies; + drawobj->context->fault_count = 1; } else { - cmdbatch->context->fault_count++; - if (cmdbatch->context->fault_count > + drawobj->context->fault_count++; + if (drawobj->context->fault_count > _fault_throttle_burst) { set_bit(KGSL_FT_DISABLE, - &cmdbatch->fault_policy); - pr_context(device, cmdbatch->context, + &cmdobj->fault_policy); + pr_context(device, drawobj->context, "gpu fault threshold exceeded %d faults in %d msecs\n", _fault_throttle_burst, _fault_throttle_time); @@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device, } /* - * If FT is disabled for this cmdbatch invalidate immediately + * If FT is disabled for this cmdobj invalidate immediately */ - if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) || - test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) { + if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) || + test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) { state = "skipped"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* If the context is detached do not run FT on context */ - if (kgsl_context_detached(cmdbatch->context)) { + if (kgsl_context_detached(drawobj->context)) { state = "detached"; - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); } /* - * Set a flag so we don't print another PM dump if the cmdbatch fails + * Set a flag so we don't print another PM dump if the cmdobj fails * again on replay */ - set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy); + set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy); /* * A hardware fault generally means something was deterministically - * wrong with the command batch - no point in trying to replay it + * wrong with the cmdobj - no point in trying to replay it * Clear the replay bit and move on to the next policy level */ if (fault & ADRENO_HARD_FAULT) - clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy)); + clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy)); /* * A timeout fault means the IB timed out - clear the policy and * invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay - * because we won't see this cmdbatch again + * because we won't see this cmdobj again */ if (fault & ADRENO_TIMEOUT_FAULT) - bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG); + bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG); /* * If the context had a GPU page fault then it is likely it would fault @@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device, */ if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv)) { + &drawobj->context->priv)) { /* we'll need to resume the mmu later... */ - clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy); + clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy); clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, - &cmdbatch->context->priv); + &drawobj->context->priv); } /* - * Execute the fault tolerance policy. Each command batch stores the + * Execute the fault tolerance policy. Each cmdobj stores the * current fault policy that was set when it was queued. * As the options are tried in descending priority * (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared - * from the cmdbatch policy so the next thing can be tried if the + * from the cmdobj policy so the next thing can be tried if the * change comes around again */ - /* Replay the hanging command batch again */ - if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY)); - set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery); + /* Replay the hanging cmdobj again */ + if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY)); + set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery); return; } /* * Skip the last IB1 that was played but replay everything else. - * Note that the last IB1 might not be in the "hung" command batch + * Note that the last IB1 might not be in the "hung" cmdobj * because the CP may have caused a page-fault while it was prefetching * the next IB1/IB2. walk all outstanding commands and zap the * supposedly bad IB1 where ever it lurks. */ - if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB)); - set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery); + if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB)); + set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery); for (i = 0; i < count; i++) { if (replay[i] != NULL && - replay[i]->context->id == cmdbatch->context->id) - cmdbatch_skip_ib(replay[i], base); + DRAWOBJ(replay[i])->context->id == + drawobj->context->id) + _skip_ib(replay[i], base); } return; } - /* Skip the faulted command batch submission */ - if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD)); + /* Skip the faulted cmdobj submission */ + if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD)); - /* Skip faulting command batch */ - cmdbatch_skip_cmd(cmdbatch, replay, count); + /* Skip faulting cmdobj */ + _skip_cmd(cmdobj, replay, count); return; } - if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) { - trace_adreno_cmdbatch_recovery(cmdbatch, + if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) { + trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPFRAME)); - set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery); + set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery); /* - * Skip all the pending command batches for this context until + * Skip all the pending cmdobj's for this context until * the EOF frame is seen */ - cmdbatch_skip_frame(cmdbatch, replay, count); + _skip_frame(cmdobj, replay, count); return; } /* If we get here then all the policies failed */ - pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n", - state, cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n", + state, drawobj->context->id, drawobj->timestamp); /* Mark the context as failed */ - mark_guilty_context(device, cmdbatch->context->id); + mark_guilty_context(device, drawobj->context->id); /* Invalidate the context */ - adreno_drawctxt_invalidate(device, cmdbatch->context); + adreno_drawctxt_invalidate(device, drawobj->context); } /** @@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device, * @base: The IB1 base during the fault */ static void recover_dispatch_q(struct kgsl_device *device, - struct adreno_dispatcher_cmdqueue *dispatch_q, + struct adreno_dispatcher_drawqueue *dispatch_q, int fault, unsigned int base) { struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct kgsl_cmdbatch **replay = NULL; + struct kgsl_drawobj_cmd **replay; unsigned int ptr; int first = 0; int count = 0; @@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device, /* Recovery failed - mark everybody on this q guilty */ while (ptr != dispatch_q->tail) { - struct kgsl_context *context = - dispatch_q->cmd_q[ptr]->context; + struct kgsl_drawobj_cmd *cmdobj = + dispatch_q->cmd_q[ptr]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - mark_guilty_context(device, context->id); - adreno_drawctxt_invalidate(device, context); - kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]); + mark_guilty_context(device, drawobj->context->id); + adreno_drawctxt_invalidate(device, drawobj->context); + kgsl_drawobj_destroy(drawobj); - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); } /* @@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device, goto replay; } - /* Copy the inflight command batches into the temporary storage */ + /* Copy the inflight cmdobj's into the temporary storage */ ptr = dispatch_q->head; while (ptr != dispatch_q->tail) { replay[count++] = dispatch_q->cmd_q[ptr]; - ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE); + ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE); } if (fault && count) - process_cmdbatch_fault(device, replay, + process_cmdobj_fault(device, replay, count, base, fault); replay: dispatch_q->inflight = 0; dispatch_q->head = dispatch_q->tail = 0; - /* Remove any pending command batches that have been invalidated */ - remove_invalidated_cmdbatches(device, replay, count); + /* Remove any pending cmdobj's that have been invalidated */ + remove_invalidated_cmdobjs(device, replay, count); /* Replay the pending command buffers */ for (i = 0; i < count; i++) { @@ -1674,16 +1835,16 @@ replay: */ if (first == 0) { - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv); + set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv); first = 1; } /* - * Force each command batch to wait for idle - this avoids weird + * Force each cmdobj to wait for idle - this avoids weird * CP parse issues */ - set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv); + set_bit(CMDOBJ_WFI, &replay[i]->priv); ret = sendcmd(adreno_dev, replay[i]); @@ -1693,15 +1854,18 @@ replay: */ if (ret) { - pr_context(device, replay[i]->context, + pr_context(device, replay[i]->base.context, "gpu reset failed ctx %d ts %d\n", - replay[i]->context->id, replay[i]->timestamp); + replay[i]->base.context->id, + replay[i]->base.timestamp); /* Mark this context as guilty (failed recovery) */ - mark_guilty_context(device, replay[i]->context->id); + mark_guilty_context(device, + replay[i]->base.context->id); - adreno_drawctxt_invalidate(device, replay[i]->context); - remove_invalidated_cmdbatches(device, &replay[i], + adreno_drawctxt_invalidate(device, + replay[i]->base.context); + remove_invalidated_cmdobjs(device, &replay[i], count - i); } } @@ -1713,36 +1877,38 @@ replay: } static void do_header_and_snapshot(struct kgsl_device *device, - struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch) + struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj) { - /* Always dump the snapshot on a non-cmdbatch failure */ - if (cmdbatch == NULL) { + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + + /* Always dump the snapshot on a non-drawobj failure */ + if (cmdobj == NULL) { adreno_fault_header(device, rb, NULL); kgsl_device_snapshot(device, NULL); return; } /* Skip everything if the PMDUMP flag is set */ - if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) + if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy)) return; /* Print the fault header */ - adreno_fault_header(device, rb, cmdbatch); + adreno_fault_header(device, rb, cmdobj); - if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) - kgsl_device_snapshot(device, cmdbatch->context); + if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT)) + kgsl_device_snapshot(device, drawobj->context); } static int dispatcher_do_fault(struct adreno_device *adreno_dev) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp; + struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp; struct adreno_ringbuffer *rb; struct adreno_ringbuffer *hung_rb = NULL; unsigned int reg; uint64_t base; - struct kgsl_cmdbatch *cmdbatch = NULL; + struct kgsl_drawobj_cmd *cmdobj = NULL; int ret, i; int fault; int halt; @@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg); } /* - * retire cmdbatches from all the dispatch_q's before starting recovery + * retire cmdobj's from all the dispatch_q's before starting recovery */ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - adreno_dispatch_retire_cmdqueue(adreno_dev, + adreno_dispatch_retire_drawqueue(adreno_dev, &(rb->dispatch_q)); /* Select the active dispatch_q */ if (base == rb->buffer_desc.gpuaddr) { @@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) } } - if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) { - cmdbatch = dispatch_q->cmd_q[dispatch_q->head]; - trace_adreno_cmdbatch_fault(cmdbatch, fault); + if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) { + cmdobj = dispatch_q->cmd_q[dispatch_q->head]; + trace_adreno_cmdbatch_fault(cmdobj, fault); } adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, ADRENO_REG_CP_IB1_BASE_HI, &base); - do_header_and_snapshot(device, hung_rb, cmdbatch); + do_header_and_snapshot(device, hung_rb, cmdobj); /* Terminate the stalled transaction and resume the IOMMU */ if (fault & ADRENO_IOMMU_PAGE_FAULT) @@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) return 1; } -static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch, +static inline int drawobj_consumed(struct kgsl_drawobj *drawobj, unsigned int consumed, unsigned int retired) { - return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) && - (timestamp_cmp(retired, cmdbatch->timestamp) < 0)); + return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) && + (timestamp_cmp(retired, drawobj->timestamp) < 0)); } static void _print_recovery(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { static struct { unsigned int mask; const char *str; } flags[] = { ADRENO_FT_TYPES }; - int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG); + int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG); char *result = "unknown"; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); for (i = 0; i < ARRAY_SIZE(flags); i++) { if (flags[i].mask == BIT(nr)) { @@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device, } } - pr_context(device, cmdbatch->context, + pr_context(device, drawobj->context, "gpu %s ctx %d ts %d policy %lX\n", - result, cmdbatch->context->id, cmdbatch->timestamp, - cmdbatch->fault_recovery); + result, drawobj->context->id, drawobj->timestamp, + cmdobj->fault_recovery); } -static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire) +static void cmdobj_profile_ticks(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire) { - void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr; - struct adreno_cmdbatch_profile_entry *entry; + void *ptr = adreno_dev->profile_buffer.hostptr; + struct adreno_drawobj_profile_entry *entry; - entry = (struct adreno_cmdbatch_profile_entry *) - (ptr + (cmdbatch->profile_index * sizeof(*entry))); + entry = (struct adreno_drawobj_profile_entry *) + (ptr + (cmdobj->profile_index * sizeof(*entry))); rmb(); *start = entry->started; *retire = entry->retired; } -static void retire_cmdbatch(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch) +static void retire_cmdobj(struct adreno_device *adreno_dev, + struct kgsl_drawobj_cmd *cmdobj) { struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; - struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); + struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context); uint64_t start = 0, end = 0; - if (cmdbatch->fault_recovery != 0) { - set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv); - _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch); + if (cmdobj->fault_recovery != 0) { + set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv); + _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj); } - if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) - cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end); + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) + cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end); /* * For A3xx we still get the rptr from the CP_RB_RPTR instead of @@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev, * So avoid reading GPU register directly for A3xx. */ if (adreno_is_a3xx(adreno_dev)) - trace_adreno_cmdbatch_retired(cmdbatch, - (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), 0); + trace_adreno_cmdbatch_retired(drawobj, + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery); else - trace_adreno_cmdbatch_retired(cmdbatch, - (int) dispatcher->inflight, start, end, - ADRENO_CMDBATCH_RB(cmdbatch), - adreno_get_rptr(drawctxt->rb)); + trace_adreno_cmdbatch_retired(drawobj, + (int) dispatcher->inflight, start, end, + ADRENO_DRAWOBJ_RB(drawobj), + adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery); drawctxt->submit_retire_ticks[drawctxt->ticks_index] = - end - cmdbatch->submit_ticks; + end - cmdobj->submit_ticks; drawctxt->ticks_index = (drawctxt->ticks_index + 1) % SUBMIT_RETIRE_TICKS_SIZE; - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); } -static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher; int count = 0; - while (!adreno_cmdqueue_is_empty(cmdqueue)) { - struct kgsl_cmdbatch *cmdbatch = - cmdqueue->cmd_q[cmdqueue->head]; + while (!adreno_drawqueue_is_empty(drawqueue)) { + struct kgsl_drawobj_cmd *cmdobj = + drawqueue->cmd_q[drawqueue->head]; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - if (!kgsl_check_timestamp(device, cmdbatch->context, - cmdbatch->timestamp)) + if (!kgsl_check_timestamp(device, drawobj->context, + drawobj->timestamp)) break; - retire_cmdbatch(adreno_dev, cmdbatch); + retire_cmdobj(adreno_dev, cmdobj); dispatcher->inflight--; - cmdqueue->inflight--; + drawqueue->inflight--; - cmdqueue->cmd_q[cmdqueue->head] = NULL; + drawqueue->cmd_q[drawqueue->head] = NULL; - cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head, - ADRENO_DISPATCH_CMDQUEUE_SIZE); + drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head, + ADRENO_DISPATCH_DRAWQUEUE_SIZE); count++; } @@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev, } static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) + struct adreno_dispatcher_drawqueue *drawqueue) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); - struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head]; + struct kgsl_drawobj *drawobj = + DRAWOBJ(drawqueue->cmd_q[drawqueue->head]); /* Don't timeout if the timer hasn't expired yet (duh) */ - if (time_is_after_jiffies(cmdqueue->expires)) + if (time_is_after_jiffies(drawqueue->expires)) return; /* Don't timeout if the IB timeout is disabled globally */ @@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev, return; /* Don't time out if the context has disabled it */ - if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) + if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE) return; - pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n", - cmdbatch->context->id, cmdbatch->timestamp); + pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n", + drawobj->context->id, drawobj->timestamp); adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT); } -static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *cmdqueue) +static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev, + struct adreno_dispatcher_drawqueue *drawqueue) { - int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue); + int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue); /* Nothing to do if there are no pending commands */ - if (adreno_cmdqueue_is_empty(cmdqueue)) + if (adreno_drawqueue_is_empty(drawqueue)) return count; - /* Don't update the cmdqueue timeout if we are about to preempt out */ + /* Don't update the drawqueue timeout if we are about to preempt out */ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) return count; - /* Don't update the cmdqueue timeout if it isn't active */ - if (!cmdqueue_is_current(cmdqueue)) + /* Don't update the drawqueue timeout if it isn't active */ + if (!drawqueue_is_current(drawqueue)) return count; /* @@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev, */ if (count) { - cmdqueue->expires = jiffies + - msecs_to_jiffies(adreno_cmdbatch_timeout); + drawqueue->expires = jiffies + + msecs_to_jiffies(adreno_drawobj_timeout); return count; } /* * If we get here then 1) the ringbuffer is current and 2) we haven't * retired anything. Check to see if the timeout if valid for the - * current cmdbatch and fault if it has expired + * current drawobj and fault if it has expired */ - _adreno_dispatch_check_timeout(adreno_dev, cmdqueue); + _adreno_dispatch_check_timeout(adreno_dev, drawqueue); return 0; } @@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev) /* Check to see if we need to update the command timer */ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(adreno_dev->cur_rb); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(adreno_dev->cur_rb); - if (!adreno_cmdqueue_is_empty(cmdqueue)) - mod_timer(&dispatcher->timer, cmdqueue->expires); + if (!adreno_drawqueue_is_empty(drawqueue)) + mod_timer(&dispatcher->timer, drawqueue->expires); } } @@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work) /* * As long as there are inflight commands, process retired comamnds from - * all cmdqueues + * all drawqueues */ for (i = 0; i < adreno_dev->num_ringbuffers; i++) { - struct adreno_dispatcher_cmdqueue *cmdqueue = - CMDQUEUE(&adreno_dev->ringbuffers[i]); + struct adreno_dispatcher_drawqueue *drawqueue = + DRAWQUEUE(&adreno_dev->ringbuffers[i]); - count += adreno_dispatch_process_cmdqueue(adreno_dev, - cmdqueue); + count += adreno_dispatch_process_drawqueue(adreno_dev, + drawqueue); if (dispatcher->inflight == 0) break; } @@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, } /* - * This is called on a regular basis while command batches are inflight. Fault + * This is called on a regular basis while cmdobj's are inflight. Fault * detection registers are read and compared to the existing values - if they * changed then the GPU is still running. If they are the same between * subsequent calls then the GPU may have faulted @@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data) */ void adreno_dispatcher_start(struct kgsl_device *device) { - complete_all(&device->cmdbatch_gate); + complete_all(&device->halt_gate); /* Schedule the work loop to get things going */ adreno_dispatcher_schedule(device); @@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev) del_timer_sync(&dispatcher->fault_timer); FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { - struct adreno_dispatcher_cmdqueue *dispatch_q = + struct adreno_dispatcher_drawqueue *dispatch_q = &(rb->dispatch_q); - while (!adreno_cmdqueue_is_empty(dispatch_q)) { - kgsl_cmdbatch_destroy( - dispatch_q->cmd_q[dispatch_q->head]); + while (!adreno_drawqueue_is_empty(dispatch_q)) { + kgsl_drawobj_destroy( + DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head])); dispatch_q->head = (dispatch_q->head + 1) - % ADRENO_DISPATCH_CMDQUEUE_SIZE; + % ADRENO_DISPATCH_DRAWQUEUE_SIZE; } } @@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher, *((unsigned int *) attr->value)); } -static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE, +static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_hi); static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644, - ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo); + ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo); /* * Our code that "puts back" a command from the context is much cleaner * if we are sure that there will always be enough room in the * ringbuffer so restrict the maximum size of the context queue to - * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1 + * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1 */ -static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644, - ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size); +static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644, + ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size); static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0, - _context_cmdbatch_burst); -static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0, - adreno_cmdbatch_timeout); + _context_drawobj_burst); +static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0, + adreno_drawobj_timeout); static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait); static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0, _fault_timer_interval); @@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0, static struct attribute *dispatcher_attrs[] = { &dispatcher_attr_inflight.attr, &dispatcher_attr_inflight_low_latency.attr, - &dispatcher_attr_context_cmdqueue_size.attr, + &dispatcher_attr_context_drawqueue_size.attr, &dispatcher_attr_context_burst_count.attr, - &dispatcher_attr_cmdbatch_timeout.attr, + &dispatcher_attr_drawobj_timeout.attr, &dispatcher_attr_context_queue_wait.attr, &dispatcher_attr_fault_detect_interval.attr, &dispatcher_attr_fault_throttle_time.attr, diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h index 699c3e4adb27..cb9106fedc82 100644 --- a/drivers/gpu/msm/adreno_dispatch.h +++ b/drivers/gpu/msm/adreno_dispatch.h @@ -15,7 +15,7 @@ #define ____ADRENO_DISPATCHER_H extern unsigned int adreno_disp_preempt_fair_sched; -extern unsigned int adreno_cmdbatch_timeout; +extern unsigned int adreno_drawobj_timeout; extern unsigned int adreno_dispatch_starvation_time; extern unsigned int adreno_dispatch_time_slice; @@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states { * sizes that can be chosen at runtime */ -#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128 +#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128 -#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) +#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s)) /** - * struct adreno_dispatcher_cmdqueue - List of commands for a RB level - * @cmd_q: List of command batches submitted to dispatcher + * struct adreno_dispatcher_drawqueue - List of commands for a RB level + * @cmd_q: List of command obj's submitted to dispatcher * @inflight: Number of commands inflight in this q * @head: Head pointer to the q * @tail: Queues tail pointer - * @active_context_count: Number of active contexts seen in this rb cmdqueue - * @expires: The jiffies value at which this cmdqueue has run too long + * @active_context_count: Number of active contexts seen in this rb drawqueue + * @expires: The jiffies value at which this drawqueue has run too long */ -struct adreno_dispatcher_cmdqueue { - struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE]; +struct adreno_dispatcher_drawqueue { + struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE]; unsigned int inflight; unsigned int head; unsigned int tail; @@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue { * struct adreno_dispatcher - container for the adreno GPU dispatcher * @mutex: Mutex to protect the structure * @state: Current state of the dispatcher (active or paused) - * @timer: Timer to monitor the progress of the command batches - * @inflight: Number of command batch operations pending in the ringbuffer + * @timer: Timer to monitor the progress of the drawobjs + * @inflight: Number of drawobj operations pending in the ringbuffer * @fault: Non-zero if a fault was detected. - * @pending: Priority list of contexts waiting to submit command batches + * @pending: Priority list of contexts waiting to submit drawobjs * @plist_lock: Spin lock to protect the pending queue * @work: work_struct to put the dispatcher in a work queue * @kobj: kobject for the dispatcher directory in the device sysfs node @@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev); void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev); void adreno_dispatcher_stop(struct adreno_device *adreno_dev); -int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev, - struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp); +int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void adreno_dispatcher_schedule(struct kgsl_device *device); void adreno_dispatcher_pause(struct adreno_device *adreno_dev); @@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device, void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev, int bit); void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev, - struct adreno_dispatcher_cmdqueue *dispatch_q); + struct adreno_dispatcher_drawqueue *dispatch_q); -static inline bool adreno_cmdqueue_is_empty( - struct adreno_dispatcher_cmdqueue *cmdqueue) +static inline bool adreno_drawqueue_is_empty( + struct adreno_dispatcher_drawqueue *drawqueue) { - return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail); + return (drawqueue != NULL && drawqueue->head == drawqueue->tail); } #endif /* __ADRENO_DISPATCHER_H */ diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index fb95f6108fb8..3a110ed221a8 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device, kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire); /* - * We may have cmdbatch timer running, which also uses same + * We may have kgsl sync obj timer running, which also uses same * lock, take a lock with software interrupt disabled (bh) * to avoid spin lock recursion. * * Use Spin trylock because dispatcher can acquire drawctxt->lock * if context is pending and the fence it is waiting on just got * signalled. Dispatcher acquires drawctxt->lock and tries to - * delete the cmdbatch timer using del_timer_sync(). + * delete the sync obj timer using del_timer_sync(). * del_timer_sync() waits till timer and its pending handlers * are deleted. But if the timer expires at the same time, * timer handler could be waiting on drawctxt->lock leading to a @@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device, context->id, queue, drawctxt->submitted_timestamp, start, retire); - if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { - struct kgsl_cmdbatch *cmdbatch = - drawctxt->cmdqueue[drawctxt->cmdqueue_head]; + if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { + struct kgsl_drawobj *drawobj = + drawctxt->drawqueue[drawctxt->drawqueue_head]; - if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) { + if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) { dev_err(device->dev, " possible deadlock. Context %d might be blocked for itself\n", context->id); goto stats; } - if (kgsl_cmdbatch_events_pending(cmdbatch)) { - dev_err(device->dev, - " context[%d] (ts=%d) Active sync points:\n", - context->id, cmdbatch->timestamp); + if (drawobj->type == SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); + + if (kgsl_drawobj_events_pending(syncobj)) { + dev_err(device->dev, + " context[%d] (ts=%d) Active sync points:\n", + context->id, drawobj->timestamp); - kgsl_dump_syncpoints(device, cmdbatch); + kgsl_dump_syncpoints(device, syncobj); + } } } @@ -229,19 +233,19 @@ done: return ret; } -static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt, - struct kgsl_cmdbatch **list) +static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt, + struct kgsl_drawobj **list) { int count = 0; - while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) { - struct kgsl_cmdbatch *cmdbatch = - drawctxt->cmdqueue[drawctxt->cmdqueue_head]; + while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) { + struct kgsl_drawobj *drawobj = + drawctxt->drawqueue[drawctxt->drawqueue_head]; - drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) % - ADRENO_CONTEXT_CMDQUEUE_SIZE; + drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) % + ADRENO_CONTEXT_DRAWQUEUE_SIZE; - list[count++] = cmdbatch; + list[count++] = drawobj; } return count; @@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, struct kgsl_context *context) { struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; + struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; int i, count; trace_adreno_drawctxt_invalidate(drawctxt); @@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device, drawctxt->timestamp); /* Get rid of commands still waiting in the queue */ - count = drawctxt_detach_cmdbatches(drawctxt, list); + count = drawctxt_detach_drawobjs(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { kgsl_cancel_events_timestamp(device, &context->events, list[i]->timestamp); - kgsl_cmdbatch_destroy(list[i]); + kgsl_drawobj_destroy(list[i]); } /* Make sure all pending events are processed or cancelled */ @@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) struct adreno_context *drawctxt; struct adreno_ringbuffer *rb; int ret, count, i; - struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE]; + struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; if (context == NULL) return; @@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) spin_unlock(&adreno_dev->active_list_lock); spin_lock(&drawctxt->lock); - count = drawctxt_detach_cmdbatches(drawctxt, list); + count = drawctxt_detach_drawobjs(drawctxt, list); spin_unlock(&drawctxt->lock); for (i = 0; i < count; i++) { @@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context) * detached status here. */ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]); - kgsl_cmdbatch_destroy(list[i]); + kgsl_drawobj_destroy(list[i]); } /* @@ -499,13 +503,20 @@ void adreno_drawctxt_detach(struct kgsl_context *context) /* * If the wait for global fails due to timeout then nothing after this - * point is likely to work very well - BUG_ON() so we can take advantage - * of the debug tools to figure out what the h - e - double hockey - * sticks happened. If EAGAIN error is returned then recovery will kick - * in and there will be no more commands in the RB pipe from this - * context which is waht we are waiting for, so ignore -EAGAIN error + * point is likely to work very well - Get GPU snapshot and BUG_ON() + * so we can take advantage of the debug tools to figure out what the + * h - e - double hockey sticks happened. If EAGAIN error is returned + * then recovery will kick in and there will be no more commands in the + * RB pipe from this context which is waht we are waiting for, so ignore + * -EAGAIN error */ - BUG_ON(ret && ret != -EAGAIN); + if (ret && ret != -EAGAIN) { + KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n", + drawctxt->internal_timestamp, + drawctxt->type, ret); + device->force_panic = 1; + kgsl_device_snapshot(device, context); + } kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(context->id, soptimestamp), diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 5ea911954991..0578f16ae9e1 100644 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -18,7 +18,7 @@ struct adreno_context_type { const char *str; }; -#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128 +#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128 #define SUBMIT_RETIRE_TICKS_SIZE 7 struct kgsl_device; @@ -32,20 +32,21 @@ struct kgsl_context; * @internal_timestamp: Global timestamp of the last issued command * NOTE: guarded by device->mutex, not drawctxt->mutex! * @type: Context type (GL, CL, RS) - * @mutex: Mutex to protect the cmdqueue - * @cmdqueue: Queue of command batches waiting to be dispatched for this context - * @cmdqueue_head: Head of the cmdqueue queue - * @cmdqueue_tail: Tail of the cmdqueue queue + * @mutex: Mutex to protect the drawqueue + * @drawqueue: Queue of drawobjs waiting to be dispatched for this + * context + * @drawqueue_head: Head of the drawqueue queue + * @drawqueue_tail: Tail of the drawqueue queue * @pending: Priority list node for the dispatcher list of pending contexts * @wq: Workqueue structure for contexts to sleep pending room in the queue * @waiting: Workqueue structure for contexts waiting for a timestamp or event - * @queued: Number of commands queued in the cmdqueue - * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd(); + * @queued: Number of commands queued in the drawqueue + * @fault_policy: GFT fault policy set in _skip_cmd(); * @debug_root: debugfs entry for this context. * @queued_timestamp: The last timestamp that was queued on this context * @rb: The ringbuffer in which this context submits commands. * @submitted_timestamp: The last timestamp that was submitted for this context - * @submit_retire_ticks: Array to hold cmdbatch execution times from submit + * @submit_retire_ticks: Array to hold command obj execution times from submit * to retire * @ticks_index: The index into submit_retire_ticks[] where the new delta will * be written. @@ -60,9 +61,9 @@ struct adreno_context { spinlock_t lock; /* Dispatcher */ - struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE]; - unsigned int cmdqueue_head; - unsigned int cmdqueue_tail; + struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE]; + unsigned int drawqueue_head; + unsigned int drawqueue_tail; struct plist_node pending; wait_queue_head_t wq; @@ -92,8 +93,9 @@ struct adreno_context { * @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame * marker. * @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission. - * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during + * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during fault tolerance. + * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context. */ enum adreno_context_priv { ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC, @@ -102,6 +104,7 @@ enum adreno_context_priv { ADRENO_CONTEXT_SKIP_EOF, ADRENO_CONTEXT_FORCE_PREAMBLE, ADRENO_CONTEXT_SKIP_CMD, + ADRENO_CONTEXT_FENCE_LOG, }; /* Flags for adreno_drawctxt_switch() */ diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 07ef09034d7c..fc0602a60ac1 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb, sizedwords, 0, NULL); } -/** - * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit - * @device: The kgsl device pointer - * @ibdesc: Pointer to the IB descriptor - */ -static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_memobj_node *ib) -{ - struct kgsl_device *device = dev_priv->device; - struct kgsl_process_private *private = dev_priv->process_priv; - - /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */ - if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) { - pr_context(device, context, "ctxt %d invalid ib size %lld\n", - context->id, ib->size); - return false; - } - - /* Make sure that the address is mapped */ - if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) { - pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n", - context->id, ib->gpuaddr); - return false; - } - - return true; -} - -int -adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, - struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamp) -{ - struct kgsl_device *device = dev_priv->device; - struct adreno_device *adreno_dev = ADRENO_DEVICE(device); - struct adreno_context *drawctxt = ADRENO_CONTEXT(context); - struct kgsl_memobj_node *ib; - int ret; - - if (kgsl_context_invalid(context)) - return -EDEADLK; - - /* Verify the IBs before they get queued */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) - if (_ringbuffer_verify_ib(dev_priv, context, ib) == false) - return -EINVAL; - - /* wait for the suspend gate */ - wait_for_completion(&device->cmdbatch_gate); - - /* - * Clear the wake on touch bit to indicate an IB has been - * submitted since the last time we set it. But only clear - * it when we have rendering commands. - */ - if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER) - && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC)) - device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH; - - /* A3XX does not have support for command batch profiling */ - if (adreno_is_a3xx(adreno_dev) && - (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)) - return -EOPNOTSUPP; - - /* Queue the command in the ringbuffer */ - ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch, - timestamp); - - /* - * Return -EPROTO if the device has faulted since the last time we - * checked - userspace uses this to perform post-fault activities - */ - if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv)) - ret = -EPROTO; - - return ret; -} - static void adreno_ringbuffer_set_constraint(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj *drawobj) { - struct kgsl_context *context = cmdbatch->context; + struct kgsl_context *context = drawobj->context; /* * Check if the context has a constraint and constraint flags are * set. */ if (context->pwr_constraint.type && ((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) || - (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) + (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT))) kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint, context->id); } @@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev, /* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time) + struct kgsl_drawobj_cmd *cmdobj, + struct adreno_submit_time *time) { struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); struct kgsl_memobj_node *ib; unsigned int numibs = 0; unsigned int *link; @@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, struct kgsl_context *context; struct adreno_context *drawctxt; bool use_preamble = true; - bool cmdbatch_user_profiling = false; - bool cmdbatch_kernel_profiling = false; + bool user_profiling = false; + bool kernel_profiling = false; int flags = KGSL_CMD_FLAGS_NONE; int ret; struct adreno_ringbuffer *rb; - struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL; + struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL; unsigned int dwords = 0; struct adreno_submit_time local; - struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry; + struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry; if (entry) profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc, - cmdbatch->profiling_buffer_gpuaddr); + cmdobj->profiling_buffer_gpuaddr); - context = cmdbatch->context; + context = drawobj->context; drawctxt = ADRENO_CONTEXT(context); /* Get the total IBs in the list */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) + list_for_each_entry(ib, &cmdobj->cmdlist, node) numibs++; rb = drawctxt->rb; @@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * c) force preamble for commandbatch */ if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) && - (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) { + (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) { - set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery); - cmdbatch->fault_policy = drawctxt->fault_policy; - set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv); + set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery); + cmdobj->fault_policy = drawctxt->fault_policy; + set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv); /* if context is detached print fault recovery */ - adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch); + adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj); /* clear the drawctxt flags */ clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv); @@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, if a context switch hasn't occured */ if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) && - !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) && + !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) && (rb->drawctxt_active == drawctxt)) use_preamble = false; @@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, * the accounting sane. Set start_index and numibs to 0 to just * generate the start and end markers and skip everything else */ - if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) { + if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) { use_preamble = false; numibs = 0; } @@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, /* Each IB takes up 30 dwords in worst case */ dwords += (numibs * 30); - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING && + if (drawobj->flags & KGSL_DRAWOBJ_PROFILING && !adreno_is_a3xx(adreno_dev) && profile_buffer) { - cmdbatch_user_profiling = true; + user_profiling = true; dwords += 6; /* @@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, time = &local; } - if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) { - cmdbatch_kernel_profiling = true; + if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) { + kernel_profiling = true; dwords += 6; if (adreno_is_a5xx(adreno_dev)) dwords += 2; @@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, *cmds++ = cp_packet(adreno_dev, CP_NOP, 1); *cmds++ = KGSL_START_OF_IB_IDENTIFIER; - if (cmdbatch_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - adreno_dev->cmdbatch_profile_buffer.gpuaddr + - ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index, + adreno_dev->profile_buffer.gpuaddr + + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, started)); } /* - * Add cmds to read the GPU ticks at the start of the cmdbatch and - * write it into the appropriate cmdbatch profiling buffer offset + * Add cmds to read the GPU ticks at the start of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (cmdbatch_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - cmdbatch->profiling_buffer_gpuaddr + - offsetof(struct kgsl_cmdbatch_profiling_buffer, + cmdobj->profiling_buffer_gpuaddr + + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_submitted)); } if (numibs) { - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { /* * Skip 0 sized IBs - these are presumed to have been * removed from consideration by the FT policy @@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, adreno_is_preemption_enabled(adreno_dev)) cmds += gpudev->preemption_yield_enable(cmds); - if (cmdbatch_kernel_profiling) { + if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - adreno_dev->cmdbatch_profile_buffer.gpuaddr + - ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index, + adreno_dev->profile_buffer.gpuaddr + + ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index, retired)); } /* - * Add cmds to read the GPU ticks at the end of the cmdbatch and - * write it into the appropriate cmdbatch profiling buffer offset + * Add cmds to read the GPU ticks at the end of command obj and + * write it into the appropriate command obj profiling buffer offset */ - if (cmdbatch_user_profiling) { + if (user_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, - cmdbatch->profiling_buffer_gpuaddr + - offsetof(struct kgsl_cmdbatch_profiling_buffer, + cmdobj->profiling_buffer_gpuaddr + + offsetof(struct kgsl_drawobj_profiling_buffer, gpu_ticks_retired)); } @@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, goto done; } - if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv)) + if (test_bit(CMDOBJ_WFI, &cmdobj->priv)) flags = KGSL_CMD_FLAGS_WFI; /* @@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, flags |= KGSL_CMD_FLAGS_PWRON_FIXUP; /* Set the constraints before adding to ringbuffer */ - adreno_ringbuffer_set_constraint(device, cmdbatch); + adreno_ringbuffer_set_constraint(device, drawobj); /* CFF stuff executed only if CFF is enabled */ - kgsl_cffdump_capture_ib_desc(device, context, cmdbatch); + kgsl_cffdump_capture_ib_desc(device, context, cmdobj); ret = adreno_ringbuffer_addcmds(rb, flags, &link[0], (cmds - link), - cmdbatch->timestamp, time); + drawobj->timestamp, time); if (!ret) { - cmdbatch->global_ts = drawctxt->internal_timestamp; + cmdobj->global_ts = drawctxt->internal_timestamp; /* Put the timevalues in the profiling buffer */ - if (cmdbatch_user_profiling) { + if (user_profiling) { /* * Return kernel clock time to the the client * if requested */ - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) { + if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) { uint64_t secs = time->ktime; profile_buffer->wall_clock_ns = @@ -1069,9 +992,8 @@ done: kgsl_memdesc_unmap(&entry->memdesc); - trace_kgsl_issueibcmds(device, context->id, cmdbatch, - numibs, cmdbatch->timestamp, - cmdbatch->flags, ret, drawctxt->type); + trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp, + drawobj->flags, ret, drawctxt->type); kfree(link); return ret; diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index b126f710b5e6..63374af1e3f7 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -119,7 +119,7 @@ struct adreno_ringbuffer { struct adreno_context *drawctxt_active; struct kgsl_memdesc preemption_desc; struct kgsl_memdesc pagetable_desc; - struct adreno_dispatcher_cmdqueue dispatch_q; + struct adreno_dispatcher_drawqueue dispatch_q; wait_queue_head_t ts_expire_waitq; unsigned int wptr_preempt_end; unsigned int gpr11; @@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set); int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj *drawobj, uint32_t *timestamp); int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, - struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj_cmd *cmdobj, struct adreno_submit_time *time); int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt); diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h index f52ddfa894d5..16ca0980cfbe 100644 --- a/drivers/gpu/msm/adreno_trace.h +++ b/drivers/gpu/msm/adreno_trace.h @@ -27,8 +27,8 @@ #include "adreno_a5xx.h" TRACE_EVENT(adreno_cmdbatch_queued, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued), - TP_ARGS(cmdbatch, queued), + TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued), + TP_ARGS(drawobj, queued), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued, __field(unsigned int, prio) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->queued = queued; - __entry->flags = cmdbatch->flags; - __entry->prio = cmdbatch->context->priority; + __entry->flags = drawobj->flags; + __entry->prio = drawobj->context->priority; ), TP_printk( "ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s", __entry->id, __entry->prio, __entry->timestamp, __entry->queued, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none" + KGSL_DRAWOBJ_FLAGS) : "none" ) ); TRACE_EVENT(adreno_cmdbatch_submitted, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks, + TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks, unsigned long secs, unsigned long usecs, struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr), + TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted, __field(int, q_inflight) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->inflight = inflight; - __entry->flags = cmdbatch->flags; + __entry->flags = drawobj->flags; __entry->ticks = ticks; __entry->secs = secs; __entry->usecs = usecs; - __entry->prio = cmdbatch->context->priority; + __entry->prio = drawobj->context->priority; __entry->rb_id = rb->id; __entry->rptr = rptr; __entry->wptr = rb->wptr; @@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted, __entry->id, __entry->prio, __entry->timestamp, __entry->inflight, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none", + KGSL_DRAWOBJ_FLAGS) : "none", __entry->ticks, __entry->secs, __entry->usecs, __entry->rb_id, __entry->rptr, __entry->wptr, __entry->q_inflight @@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted, ); TRACE_EVENT(adreno_cmdbatch_retired, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, + TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t start, uint64_t retire, - struct adreno_ringbuffer *rb, unsigned int rptr), - TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr), + struct adreno_ringbuffer *rb, unsigned int rptr, + unsigned long fault_recovery), + TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) @@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired, __field(unsigned int, rptr) __field(unsigned int, wptr) __field(int, q_inflight) + __field(unsigned long, fault_recovery) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = drawobj->context->id; + __entry->timestamp = drawobj->timestamp; __entry->inflight = inflight; - __entry->recovery = cmdbatch->fault_recovery; - __entry->flags = cmdbatch->flags; + __entry->recovery = fault_recovery; + __entry->flags = drawobj->flags; __entry->start = start; __entry->retire = retire; - __entry->prio = cmdbatch->context->priority; + __entry->prio = drawobj->context->priority; __entry->rb_id = rb->id; __entry->rptr = rptr; __entry->wptr = rb->wptr; @@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired, __print_flags(__entry->recovery, "|", ADRENO_FT_TYPES) : "none", __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "none", + KGSL_DRAWOBJ_FLAGS) : "none", __entry->start, __entry->retire, __entry->rb_id, __entry->rptr, __entry->wptr, @@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired, ); TRACE_EVENT(adreno_cmdbatch_fault, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault), - TP_ARGS(cmdbatch, fault), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault), + TP_ARGS(cmdobj, fault), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, fault) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->fault = fault; ), TP_printk( @@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault, ); TRACE_EVENT(adreno_cmdbatch_recovery, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action), - TP_ARGS(cmdbatch, action), + TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action), + TP_ARGS(cmdobj, action), TP_STRUCT__entry( __field(unsigned int, id) __field(unsigned int, timestamp) __field(unsigned int, action) ), TP_fast_assign( - __entry->id = cmdbatch->context->id; - __entry->timestamp = cmdbatch->timestamp; + __entry->id = cmdobj->base.context->id; + __entry->timestamp = cmdobj->base.timestamp; __entry->action = action; ), TP_printk( diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 88581b079246..add4590bbb90 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -36,7 +36,7 @@ #include "kgsl_cffdump.h" #include "kgsl_log.h" #include "kgsl_sharedmem.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #include "kgsl_device.h" #include "kgsl_trace.h" #include "kgsl_sync.h" @@ -1497,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, struct kgsl_ringbuffer_issueibcmds *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; + struct kgsl_drawobj *drawobj; + struct kgsl_drawobj_cmd *cmdobj; long result = -EINVAL; /* The legacy functions don't support synchronization commands */ - if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))) + if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER))) + return -EINVAL; + + /* Sanity check the number of IBs */ + if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST && + (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS)) return -EINVAL; /* Get the context */ @@ -1509,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, if (context == NULL) return -EINVAL; - /* Create a command batch */ - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags, + CMDOBJ_TYPE); + if (IS_ERR(cmdobj)) { + kgsl_context_put(context); + return PTR_ERR(cmdobj); } - if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) { - /* Sanity check the number of IBs */ - if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) { - result = -EINVAL; - goto done; - } - result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch, + drawobj = DRAWOBJ(cmdobj); + + if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST) + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, (void __user *) param->ibdesc_addr, param->numibs); - } else { + else { struct kgsl_ibdesc ibdesc; /* Ultra legacy path */ @@ -1533,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, ibdesc.sizedwords = param->numibs; ibdesc.ctrl = 0; - result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); } - if (result) - goto done; - - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + if (result == 0) + result = dev_priv->device->ftbl->queue_cmds(dev_priv, context, + &drawobj, 1, ¶m->timestamp); -done: /* * -EPROTO is a "success" error - it just tells the user that the * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + kgsl_drawobj_destroy(drawobj); kgsl_context_put(context); return result; } +/* Returns 0 on failure. Returns command type(s) on success */ +static unsigned int _process_command_input(struct kgsl_device *device, + unsigned int flags, unsigned int numcmds, + unsigned int numobjs, unsigned int numsyncs) +{ + if (numcmds > KGSL_MAX_NUMIBS || + numobjs > KGSL_MAX_NUMIBS || + numsyncs > KGSL_MAX_SYNCPOINTS) + return 0; + + /* + * The SYNC bit is supposed to identify a dummy sync object + * so warn the user if they specified any IBs with it. + * A MARKER command can either have IBs or not but if the + * command has 0 IBs it is automatically assumed to be a marker. + */ + + /* If they specify the flag, go with what they say */ + if (flags & KGSL_DRAWOBJ_MARKER) + return MARKEROBJ_TYPE; + else if (flags & KGSL_DRAWOBJ_SYNC) + return SYNCOBJ_TYPE; + + /* If not, deduce what they meant */ + if (numsyncs && numcmds) + return SYNCOBJ_TYPE | CMDOBJ_TYPE; + else if (numsyncs) + return SYNCOBJ_TYPE; + else if (numcmds) + return CMDOBJ_TYPE; + else if (numcmds == 0) + return MARKEROBJ_TYPE; + + return 0; +} + long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_submit_commands *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; - long result = -EINVAL; - - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. If none of the above make sure that the user - * specified a sane number of IBs - */ - - if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (param->numcmds > KGSL_MAX_NUMIBS) - return -EINVAL; - else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0) - param->flags |= KGSL_CMDBATCH_MARKER; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - /* Make sure that we don't have too many syncpoints */ - if (param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, 0, + param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - /* Create a command batch */ - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_syncpoints(device, syncobj, + param->synclist, param->numsyncs); + if (result) + goto done; } - result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch, - param->cmdlist, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch, - param->synclist, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); - /* If no profiling buffer was specified, clear the flag */ - if (cmdbatch->profiling_buf_entry == NULL) - cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING; + result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj, + param->cmdlist, param->numcmds); + if (result) + goto done; - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } + + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1617,7 +1656,9 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); + kgsl_context_put(context); return result; @@ -1629,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv, struct kgsl_gpu_command *param = data; struct kgsl_device *device = dev_priv->device; struct kgsl_context *context; - struct kgsl_cmdbatch *cmdbatch = NULL; - - long result = -EINVAL; + struct kgsl_drawobj *drawobj[2]; + unsigned int type; + long result; + unsigned int i = 0; - /* - * The SYNC bit is supposed to identify a dummy sync object so warn the - * user if they specified any IBs with it. A MARKER command can either - * have IBs or not but if the command has 0 IBs it is automatically - * assumed to be a marker. If none of the above make sure that the user - * specified a sane number of IBs - */ - if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds) - KGSL_DEV_ERR_ONCE(device, - "Commands specified with the SYNC flag. They will be ignored\n"); - else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0) - param->flags |= KGSL_CMDBATCH_MARKER; - - /* Make sure that the memobj and syncpoint count isn't too big */ - if (param->numcmds > KGSL_MAX_NUMIBS || - param->numobjs > KGSL_MAX_NUMIBS || - param->numsyncs > KGSL_MAX_SYNCPOINTS) + type = _process_command_input(device, param->flags, param->numcmds, + param->numobjs, param->numsyncs); + if (!type) return -EINVAL; context = kgsl_context_get_owner(dev_priv, param->context_id); if (context == NULL) return -EINVAL; - cmdbatch = kgsl_cmdbatch_create(device, context, param->flags); - if (IS_ERR(cmdbatch)) { - result = PTR_ERR(cmdbatch); - goto done; + if (type & SYNCOBJ_TYPE) { + struct kgsl_drawobj_sync *syncobj = + kgsl_drawobj_sync_create(device, context); + + if (IS_ERR(syncobj)) { + result = PTR_ERR(syncobj); + goto done; + } + + drawobj[i++] = DRAWOBJ(syncobj); + + result = kgsl_drawobj_sync_add_synclist(device, syncobj, + to_user_ptr(param->synclist), + param->syncsize, param->numsyncs); + if (result) + goto done; } - result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch, - to_user_ptr(param->cmdlist), - param->cmdsize, param->numcmds); - if (result) - goto done; + if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) { + struct kgsl_drawobj_cmd *cmdobj = + kgsl_drawobj_cmd_create(device, + context, param->flags, type); - result = kgsl_cmdbatch_add_memlist(device, cmdbatch, - to_user_ptr(param->objlist), - param->objsize, param->numobjs); - if (result) - goto done; + if (IS_ERR(cmdobj)) { + result = PTR_ERR(cmdobj); + goto done; + } - result = kgsl_cmdbatch_add_synclist(device, cmdbatch, - to_user_ptr(param->synclist), - param->syncsize, param->numsyncs); - if (result) - goto done; + drawobj[i++] = DRAWOBJ(cmdobj); + + result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj, + to_user_ptr(param->cmdlist), + param->cmdsize, param->numcmds); + if (result) + goto done; - /* If no profiling buffer was specified, clear the flag */ - if (cmdbatch->profiling_buf_entry == NULL) - cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING; + result = kgsl_drawobj_cmd_add_memlist(device, cmdobj, + to_user_ptr(param->objlist), + param->objsize, param->numobjs); + if (result) + goto done; + + /* If no profiling buffer was specified, clear the flag */ + if (cmdobj->profiling_buf_entry == NULL) + DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING; + } - result = dev_priv->device->ftbl->issueibcmds(dev_priv, context, - cmdbatch, ¶m->timestamp); + result = device->ftbl->queue_cmds(dev_priv, context, drawobj, + i, ¶m->timestamp); done: /* @@ -1693,7 +1740,8 @@ done: * context had previously faulted */ if (result && result != -EPROTO) - kgsl_cmdbatch_destroy(cmdbatch); + while (i--) + kgsl_drawobj_destroy(drawobj[i]); kgsl_context_put(context); return result; @@ -4600,7 +4648,7 @@ static void kgsl_core_exit(void) kgsl_driver.class = NULL; } - kgsl_cmdbatch_exit(); + kgsl_drawobj_exit(); kgsl_memfree_exit(); unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX); @@ -4676,7 +4724,7 @@ static int __init kgsl_core_init(void) kgsl_events_init(); - result = kgsl_cmdbatch_init(); + result = kgsl_drawobj_init(); if (result) goto err; diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 25f5de6ce645..826c4edb3582 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -28,6 +28,25 @@ #include <linux/uaccess.h> #include <asm/cacheflush.h> +/* + * --- kgsl drawobj flags --- + * These flags are same as --- drawobj flags --- + * but renamed to reflect that cmdbatch is renamed to drawobj. + */ +#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST +#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER +#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST +#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH +#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING +#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME +#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME +#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC +#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT +#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE + +#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer + + /* The number of memstore arrays limits the number of contexts allowed. * If more contexts are needed, update multiple for MEMSTORE_SIZE */ @@ -579,4 +598,19 @@ static inline void __user *to_user_ptr(uint64_t address) return (void __user *)(uintptr_t)address; } +static inline void kgsl_gpu_sysfs_add_link(struct kobject *dst, + struct kobject *src, const char *src_name, + const char *dst_name) +{ + struct kernfs_node *old; + + if (dst == NULL || src == NULL) + return; + + old = sysfs_get_dirent(src->sd, src_name); + if (IS_ERR_OR_NULL(old)) + return; + + kernfs_create_link(dst->sd, dst_name, old); +} #endif /* __KGSL_H */ diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c index 8e783f8ce017..3337570477f9 100644 --- a/drivers/gpu/msm/kgsl_cffdump.c +++ b/drivers/gpu/msm/kgsl_cffdump.c @@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device, */ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { int ret = 0; struct kgsl_memobj_node *ib; @@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, if (!device->cff_dump_enable) return 0; /* Dump CFF for IB and all objects in it */ - list_for_each_entry(ib, &cmdbatch->cmdlist, node) { + list_for_each_entry(ib, &cmdobj->cmdlist, node) { ret = kgsl_cffdump_capture_adreno_ib_cff( device, context->proc_priv, ib->gpuaddr, ib->size >> 2); diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h index 315a097ba817..14bc397cb570 100644 --- a/drivers/gpu/msm/kgsl_cffdump.h +++ b/drivers/gpu/msm/kgsl_cffdump.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val); int kgsl_cff_dump_enable_get(void *data, u64 *val); int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch); + struct kgsl_drawobj_cmd *cmdobj); void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2, uint op3, uint op4, uint op5); @@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device, static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device, struct kgsl_context *context, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_cmd *cmdobj) { return 0; } diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h deleted file mode 100644 index d5cbf375b5d3..000000000000 --- a/drivers/gpu/msm/kgsl_cmdbatch.h +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __KGSL_CMDBATCH_H -#define __KGSL_CMDBATCH_H - -#define KGSL_CMDBATCH_FLAGS \ - { KGSL_CMDBATCH_MARKER, "MARKER" }, \ - { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \ - { KGSL_CMDBATCH_SYNC, "SYNC" }, \ - { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \ - { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ - { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" } - -/** - * struct kgsl_cmdbatch - KGSl command descriptor - * @device: KGSL GPU device that the command was created for - * @context: KGSL context that created the command - * @timestamp: Timestamp assigned to the command - * @flags: flags - * @priv: Internal flags - * @fault_policy: Internal policy describing how to handle this command in case - * of a fault - * @fault_recovery: recovery actions actually tried for this batch - * @refcount: kref structure to maintain the reference count - * @cmdlist: List of IBs to issue - * @memlist: List of all memory used in this command batch - * @synclist: Array of context/timestamp tuples to wait for before issuing - * @numsyncs: Number of sync entries in the array - * @pending: Bitmask of sync events that are active - * @timer: a timer used to track possible sync timeouts for this cmdbatch - * @marker_timestamp: For markers, the timestamp of the last "real" command that - * was queued - * @profiling_buf_entry: Mem entry containing the profiling buffer - * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here - * for easy access - * @profile_index: Index to store the start/stop ticks in the kernel profiling - * buffer - * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit. - * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch - * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the - * timer will expire - * This structure defines an atomic batch of command buffers issued from - * userspace. - */ -struct kgsl_cmdbatch { - struct kgsl_device *device; - struct kgsl_context *context; - uint32_t timestamp; - uint32_t flags; - unsigned long priv; - unsigned long fault_policy; - unsigned long fault_recovery; - struct kref refcount; - struct list_head cmdlist; - struct list_head memlist; - struct kgsl_cmdbatch_sync_event *synclist; - unsigned int numsyncs; - unsigned long pending; - struct timer_list timer; - unsigned int marker_timestamp; - struct kgsl_mem_entry *profiling_buf_entry; - uint64_t profiling_buffer_gpuaddr; - unsigned int profile_index; - uint64_t submit_ticks; - unsigned int global_ts; - unsigned long timeout_jiffies; -}; - -/** - * struct kgsl_cmdbatch_sync_event - * @id: identifer (positiion within the pending bitmap) - * @type: Syncpoint type - * @cmdbatch: Pointer to the cmdbatch that owns the sync event - * @context: Pointer to the KGSL context that owns the cmdbatch - * @timestamp: Pending timestamp for the event - * @handle: Pointer to a sync fence handle - * @device: Pointer to the KGSL device - */ -struct kgsl_cmdbatch_sync_event { - unsigned int id; - int type; - struct kgsl_cmdbatch *cmdbatch; - struct kgsl_context *context; - unsigned int timestamp; - struct kgsl_sync_fence_waiter *handle; - struct kgsl_device *device; -}; - -/** - * enum kgsl_cmdbatch_priv - Internal cmdbatch flags - * @CMDBATCH_FLAG_SKIP - skip the entire command batch - * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch - * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission - * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch - * in the profiling buffer - * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the - * cmdbatch timer - this is used to avoid recursion - */ - -enum kgsl_cmdbatch_priv { - CMDBATCH_FLAG_SKIP = 0, - CMDBATCH_FLAG_FORCE_PREAMBLE, - CMDBATCH_FLAG_WFI, - CMDBATCH_FLAG_PROFILE, - CMDBATCH_FLAG_FENCE_LOG, -}; - - -int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch, - struct kgsl_ibdesc *ibdesc); - -int kgsl_cmdbatch_add_sync(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, - struct kgsl_cmd_syncpoint *sync); - -struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags); -int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc); -int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count); -int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count); -int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); -int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); -int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, - unsigned int size, unsigned int count); - -int kgsl_cmdbatch_init(void); -void kgsl_cmdbatch_exit(void); - -void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch); - -void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch); - -void kgsl_cmdbatch_destroy_object(struct kref *kref); - -static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch) -{ - return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS); -} - -static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch, - unsigned int bit) -{ - if (bit >= KGSL_MAX_SYNCPOINTS) - return false; - - return test_bit(bit, &cmdbatch->pending); -} - -#endif /* __KGSL_CMDBATCH_H */ diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h index ca1685e5fcf5..7681d74fb108 100644 --- a/drivers/gpu/msm/kgsl_compat.h +++ b/drivers/gpu/msm/kgsl_compat.h @@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size) return (compat_size_t)size; } -int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags, - struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist, +int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags, + struct kgsl_drawobj *drawobj, void __user *cmdlist, unsigned int numcmds, void __user *synclist, unsigned int numsyncs); @@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); #else -static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device, - unsigned int flags, struct kgsl_cmdbatch *cmdbatch, +static inline int kgsl_drawobj_create_compat(struct kgsl_device *device, + unsigned int flags, struct kgsl_drawobj *drawobj, void __user *cmdlist, unsigned int numcmds, void __user *synclist, unsigned int numsyncs) { diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 0df6dd8628a5..04935e8d0019 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -25,7 +25,7 @@ #include "kgsl_pwrscale.h" #include "kgsl_snapshot.h" #include "kgsl_sharedmem.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #define KGSL_IOCTL_FUNC(_cmd, _func) \ [_IOC_NR((_cmd))] = \ @@ -127,9 +127,9 @@ struct kgsl_functable { unsigned int msecs); int (*readtimestamp) (struct kgsl_device *device, void *priv, enum kgsl_timestamp_type type, unsigned int *timestamp); - int (*issueibcmds) (struct kgsl_device_private *dev_priv, - struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch, - uint32_t *timestamps); + int (*queue_cmds)(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, struct kgsl_drawobj *drawobj[], + uint32_t count, uint32_t *timestamp); void (*power_stats)(struct kgsl_device *device, struct kgsl_power_stats *stats); unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid); @@ -167,6 +167,8 @@ struct kgsl_functable { void (*regulator_disable_poll)(struct kgsl_device *device); void (*clk_set_options)(struct kgsl_device *device, const char *name, struct clk *clk); + void (*gpu_model)(struct kgsl_device *device, char *str, + size_t bufsz); }; struct kgsl_ioctl { @@ -184,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg, /** * struct kgsl_memobj_node - Memory object descriptor - * @node: Local list node for the cmdbatch + * @node: Local list node for the object * @id: GPU memory ID for the object * offset: Offset within the object * @gpuaddr: GPU address for the object @@ -233,7 +235,7 @@ struct kgsl_device { struct kgsl_mmu mmu; struct completion hwaccess_gate; - struct completion cmdbatch_gate; + struct completion halt_gate; const struct kgsl_functable *ftbl; struct work_struct idle_check_ws; struct timer_list idle_timer; @@ -261,6 +263,7 @@ struct kgsl_device { struct kgsl_snapshot *snapshot; u32 snapshot_faultcount; /* Total number of faults since boot */ + bool force_panic; /* Force panic after snapshot dump */ struct kobject snapshot_kobj; struct kobject ppd_kobj; @@ -281,6 +284,7 @@ struct kgsl_device { /* Number of active contexts seen globally for this device */ int active_context_count; + struct kobject *gpu_sysfs_kobj; }; #define KGSL_MMU_DEVICE(_mmu) \ @@ -288,7 +292,7 @@ struct kgsl_device { #define KGSL_DEVICE_COMMON_INIT(_dev) \ .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\ - .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\ + .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\ .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\ kgsl_idle_check),\ .context_idr = IDR_INIT((_dev).context_idr),\ diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c index 6272410ce544..7840daa6a3e2 100644 --- a/drivers/gpu/msm/kgsl_cmdbatch.c +++ b/drivers/gpu/msm/kgsl_drawobj.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -11,17 +11,17 @@ */ /* - * KGSL command batch management - * A command batch is a single submission from userland. The cmdbatch + * KGSL drawobj management + * A drawobj is a single submission from userland. The drawobj * encapsulates everything about the submission : command buffers, flags and * sync points. * * Sync points are events that need to expire before the - * cmdbatch can be queued to the hardware. All synpoints are contained in an - * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be + * drawobj can be queued to the hardware. All synpoints are contained in an + * array of kgsl_drawobj_sync_event structs in the drawobj. There can be * multiple types of events both internal ones (GPU events) and external * triggers. As the events expire bits are cleared in a pending bitmap stored - * in the command batch. The GPU will submit the command as soon as the bitmap + * in the drawobj. The GPU will submit the command as soon as the bitmap * goes to zero indicating no more pending events. */ @@ -31,7 +31,7 @@ #include "kgsl.h" #include "kgsl_device.h" -#include "kgsl_cmdbatch.h" +#include "kgsl_drawobj.h" #include "kgsl_sync.h" #include "kgsl_trace.h" #include "kgsl_compat.h" @@ -42,26 +42,43 @@ */ static struct kmem_cache *memobjs_cache; -/** - * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object - * @cmdbatch: Pointer to the command batch object - */ -static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch) +static void drawobj_destroy_object(struct kref *kref) { - if (cmdbatch) - kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object); + struct kgsl_drawobj *drawobj = container_of(kref, + struct kgsl_drawobj, refcount); + struct kgsl_drawobj_sync *syncobj; + + kgsl_context_put(drawobj->context); + + switch (drawobj->type) { + case SYNCOBJ_TYPE: + syncobj = SYNCOBJ(drawobj); + kfree(syncobj->synclist); + kfree(syncobj); + break; + case CMDOBJ_TYPE: + case MARKEROBJ_TYPE: + kfree(CMDOBJ(drawobj)); + break; + } +} + +static inline void drawobj_put(struct kgsl_drawobj *drawobj) +{ + if (drawobj) + kref_put(&drawobj->refcount, drawobj_destroy_object); } void kgsl_dump_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch) + struct kgsl_drawobj_sync *syncobj) { - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; unsigned int i; - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device, } } -static void _kgsl_cmdbatch_timer(unsigned long data) +static void syncobj_timer(unsigned long data) { struct kgsl_device *device; - struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data; - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_drawobj_sync_event *event; unsigned int i; - if (cmdbatch == NULL || cmdbatch->context == NULL) + if (syncobj == NULL || drawobj->context == NULL) return; - device = cmdbatch->context->device; + device = drawobj->context->device; dev_err(device->dev, "kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n", - cmdbatch->context->id, cmdbatch->timestamp); + drawobj->context->id, drawobj->timestamp); - set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv); - kgsl_context_dump(cmdbatch->context); - clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv); + set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); + kgsl_context_dump(drawobj->context); + clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv); dev_err(device->dev, " pending events:\n"); - for (i = 0; i < cmdbatch->numsyncs; i++) { - event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + event = &syncobj->synclist[i]; - if (!kgsl_cmdbatch_event_pending(cmdbatch, i)) + if (!kgsl_drawobj_event_pending(syncobj, i)) continue; switch (event->type) { @@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data) dev_err(device->dev, "--gpu syncpoint deadlock print end--\n"); } -/** - * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object - * @kref: Pointer to the kref structure for this object - * - * Actually destroy a command batch object. Called from kgsl_cmdbatch_put - */ -void kgsl_cmdbatch_destroy_object(struct kref *kref) -{ - struct kgsl_cmdbatch *cmdbatch = container_of(kref, - struct kgsl_cmdbatch, refcount); - - kgsl_context_put(cmdbatch->context); - - kfree(cmdbatch->synclist); - kfree(cmdbatch); -} -EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object); - /* * a generic function to retire a pending sync event and (possibly) * kick the dispatcher */ -static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device, - struct kgsl_cmdbatch_sync_event *event) +static void drawobj_sync_expire(struct kgsl_device *device, + struct kgsl_drawobj_sync_event *event) { + struct kgsl_drawobj_sync *syncobj = event->syncobj; /* * Clear the event from the pending mask - if it is already clear, then * leave without doing anything useful */ - if (!test_and_clear_bit(event->id, &event->cmdbatch->pending)) + if (!test_and_clear_bit(event->id, &syncobj->pending)) return; /* * If no more pending events, delete the timer and schedule the command * for dispatch */ - if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) { - del_timer_sync(&event->cmdbatch->timer); + if (!kgsl_drawobj_events_pending(event->syncobj)) { + del_timer_sync(&syncobj->timer); if (device->ftbl->drawctxt_sched) device->ftbl->drawctxt_sched(device, - event->cmdbatch->context); + event->syncobj->base.context); } } @@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device, * This function is called by the GPU event when the sync event timestamp * expires */ -static void kgsl_cmdbatch_sync_func(struct kgsl_device *device, +static void drawobj_sync_func(struct kgsl_device *device, struct kgsl_event_group *group, void *priv, int result) { - struct kgsl_cmdbatch_sync_event *event = priv; + struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_timestamp_expire(event->cmdbatch, + trace_syncpoint_timestamp_expire(event->syncobj, event->context, event->timestamp); - kgsl_cmdbatch_sync_expire(device, event); + drawobj_sync_expire(device, event); kgsl_context_put(event->context); - kgsl_cmdbatch_put(event->cmdbatch); + drawobj_put(&event->syncobj->base); } -static inline void _free_memobj_list(struct list_head *list) +static inline void memobj_list_free(struct list_head *list) { struct kgsl_memobj_node *mem, *tmpmem; @@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list) } } -/** - * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure - * @cmdbatch: Pointer to the command batch object to destroy - * - * Start the process of destroying a command batch. Cancel any pending events - * and decrement the refcount. Asynchronous events can still signal after - * kgsl_cmdbatch_destroy has returned. - */ -void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch) +static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj) { - unsigned int i; + struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj); unsigned long pending; - - if (IS_ERR_OR_NULL(cmdbatch)) - return; + unsigned int i; /* Zap the canary timer */ - del_timer_sync(&cmdbatch->timer); + del_timer_sync(&syncobj->timer); /* * Copy off the pending list and clear all pending events - this will * render any subsequent asynchronous callback harmless */ - bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS); - bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS); + bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS); + bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS); /* * Clear all pending events - this will render any subsequent async * callbacks harmless */ - - for (i = 0; i < cmdbatch->numsyncs; i++) { - struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i]; + for (i = 0; i < syncobj->numsyncs; i++) { + struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i]; /* Don't do anything if the event has already expired */ if (!test_bit(i, &pending)) @@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch) switch (event->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: - kgsl_cancel_event(cmdbatch->device, + kgsl_cancel_event(drawobj->device, &event->context->events, event->timestamp, - kgsl_cmdbatch_sync_func, event); + drawobj_sync_func, event); break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: if (kgsl_sync_fence_async_cancel(event->handle)) - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); break; } } /* - * Release the the refcount on the mem entry associated with the - * cmdbatch profiling buffer + * If we cancelled an event, there's a good chance that the context is + * on a dispatcher queue, so schedule to get it removed. + */ + if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && + drawobj->device->ftbl->drawctxt_sched) + drawobj->device->ftbl->drawctxt_sched(drawobj->device, + drawobj->context); + +} + +static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj) +{ + struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj); + + /* + * Release the refcount on the mem entry associated with the + * ib profiling buffer */ - if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING) - kgsl_mem_entry_put(cmdbatch->profiling_buf_entry); + if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING) + kgsl_mem_entry_put(cmdobj->profiling_buf_entry); /* Destroy the cmdlist we created */ - _free_memobj_list(&cmdbatch->cmdlist); + memobj_list_free(&cmdobj->cmdlist); /* Destroy the memlist we created */ - _free_memobj_list(&cmdbatch->memlist); + memobj_list_free(&cmdobj->memlist); +} - /* - * If we cancelled an event, there's a good chance that the context is - * on a dispatcher queue, so schedule to get it removed. +/** + * kgsl_drawobj_destroy() - Destroy a kgsl object structure + * @obj: Pointer to the kgsl object to destroy + * + * Start the process of destroying a command batch. Cancel any pending events + * and decrement the refcount. Asynchronous events can still signal after + * kgsl_drawobj_destroy has returned. */ - if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) && - cmdbatch->device->ftbl->drawctxt_sched) - cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device, - cmdbatch->context); +void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj) +{ + if (!drawobj) + return; + + if (drawobj->type & SYNCOBJ_TYPE) + drawobj_destroy_sync(drawobj); + else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) + drawobj_destroy_cmd(drawobj); + else + return; - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); } -EXPORT_SYMBOL(kgsl_cmdbatch_destroy); +EXPORT_SYMBOL(kgsl_drawobj_destroy); -/* - * A callback that gets registered with kgsl_sync_fence_async_wait and is fired - * when a fence is expired - */ -static void kgsl_cmdbatch_sync_fence_func(void *priv) +static void drawobj_sync_fence_func(void *priv) { - struct kgsl_cmdbatch_sync_event *event = priv; + struct kgsl_drawobj_sync_event *event = priv; - trace_syncpoint_fence_expire(event->cmdbatch, + trace_syncpoint_fence_expire(event->syncobj, event->handle ? event->handle->name : "unknown"); - kgsl_cmdbatch_sync_expire(event->device, event); + drawobj_sync_expire(event->device, event); - kgsl_cmdbatch_put(event->cmdbatch); + drawobj_put(&event->syncobj->base); } -/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint +/* drawobj_add_sync_fence() - Add a new sync fence syncpoint * @device: KGSL device - * @cmdbatch: KGSL cmdbatch to add the sync point to - * @priv: Private sructure passed by the user + * @syncobj: KGSL sync obj to add the sync point to + * @priv: Private structure passed by the user * - * Add a new fence sync syncpoint to the cmdbatch. + * Add a new fence sync syncpoint to the sync obj. */ -static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void *priv) +static int drawobj_add_sync_fence(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_fence *sync = priv; - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_drawobj_sync_event *event; unsigned int id; - kref_get(&cmdbatch->refcount); + kref_get(&drawobj->refcount); - id = cmdbatch->numsyncs++; + id = syncobj->numsyncs++; - event = &cmdbatch->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE; - event->cmdbatch = cmdbatch; + event->syncobj = syncobj; event->device = device; event->context = NULL; - set_bit(event->id, &cmdbatch->pending); + set_bit(event->id, &syncobj->pending); event->handle = kgsl_sync_fence_async_wait(sync->fd, - kgsl_cmdbatch_sync_fence_func, event); + drawobj_sync_fence_func, event); if (IS_ERR_OR_NULL(event->handle)) { int ret = PTR_ERR(event->handle); - clear_bit(event->id, &cmdbatch->pending); + clear_bit(event->id, &syncobj->pending); event->handle = NULL; - kgsl_cmdbatch_put(cmdbatch); + drawobj_put(drawobj); /* * If ret == 0 the fence was already signaled - print a trace * message so we can track that */ if (ret == 0) - trace_syncpoint_fence_expire(cmdbatch, "signaled"); + trace_syncpoint_fence_expire(syncobj, "signaled"); return ret; } - trace_syncpoint_fence(cmdbatch, event->handle->name); + trace_syncpoint_fence(syncobj, event->handle->name); return 0; } -/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch +/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj * @device: KGSL device - * @cmdbatch: KGSL cmdbatch to add the sync point to - * @priv: Private sructure passed by the user + * @syncobj: KGSL sync obj to add the sync point to + * @priv: Private structure passed by the user * - * Add a new sync point timestamp event to the cmdbatch. + * Add a new sync point timestamp event to the sync obj. */ -static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void *priv) +static int drawobj_add_sync_timestamp(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv) { struct kgsl_cmd_syncpoint_timestamp *sync = priv; - struct kgsl_context *context = kgsl_context_get(cmdbatch->device, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + struct kgsl_context *context = kgsl_context_get(device, sync->context_id); - struct kgsl_cmdbatch_sync_event *event; + struct kgsl_drawobj_sync_event *event; int ret = -EINVAL; unsigned int id; @@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, * create a sync point on a future timestamp. */ - if (context == cmdbatch->context) { + if (context == drawobj->context) { unsigned int queued; + kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED, &queued); @@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device, } } - kref_get(&cmdbatch->refcount); + kref_get(&drawobj->refcount); - id = cmdbatch->numsyncs++; + id = syncobj->numsyncs++; - event = &cmdbatch->synclist[id]; + event = &syncobj->synclist[id]; event->id = id; event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP; - event->cmdbatch = cmdbatch; + event->syncobj = syncobj; event->context = context; event->timestamp = sync->timestamp; event->device = device; - set_bit(event->id, &cmdbatch->pending); + set_bit(event->id, &syncobj->pending); ret = kgsl_add_event(device, &context->events, sync->timestamp, - kgsl_cmdbatch_sync_func, event); + drawobj_sync_func, event); if (ret) { - clear_bit(event->id, &cmdbatch->pending); - kgsl_cmdbatch_put(cmdbatch); + clear_bit(event->id, &syncobj->pending); + drawobj_put(drawobj); } else { - trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp); + trace_syncpoint_timestamp(syncobj, context, sync->timestamp); } done: @@ -430,43 +446,46 @@ done: } /** - * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch + * kgsl_drawobj_sync_add_sync() - Add a sync point to a command + * batch * @device: Pointer to the KGSL device struct for the GPU - * @cmdbatch: Pointer to the cmdbatch + * @syncobj: Pointer to the sync obj * @sync: Pointer to the user-specified struct defining the syncpoint * - * Create a new sync point in the cmdbatch based on the user specified - * parameters + * Create a new sync point in the sync obj based on the + * user specified parameters */ -int kgsl_cmdbatch_add_sync(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, struct kgsl_cmd_syncpoint *sync) { void *priv; int ret, psize; - int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch, + struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj); + int (*func)(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void *priv); switch (sync->type) { case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: psize = sizeof(struct kgsl_cmd_syncpoint_timestamp); - func = kgsl_cmdbatch_add_sync_timestamp; + func = drawobj_add_sync_timestamp; break; case KGSL_CMD_SYNCPOINT_TYPE_FENCE: psize = sizeof(struct kgsl_cmd_syncpoint_fence); - func = kgsl_cmdbatch_add_sync_fence; + func = drawobj_add_sync_fence; break; default: KGSL_DRV_ERR(device, "bad syncpoint type ctxt %d type 0x%x size %zu\n", - cmdbatch->context->id, sync->type, sync->size); + drawobj->context->id, sync->type, sync->size); return -EINVAL; } if (sync->size != psize) { KGSL_DRV_ERR(device, "bad syncpoint size ctxt %d type 0x%x size %zu\n", - cmdbatch->context->id, sync->type, sync->size); + drawobj->context->id, sync->type, sync->size); return -EINVAL; } @@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device, return -EFAULT; } - ret = func(device, cmdbatch, priv); + ret = func(device, syncobj, priv); kfree(priv); return ret; } static void add_profiling_buffer(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size, + struct kgsl_drawobj_cmd *cmdobj, + uint64_t gpuaddr, uint64_t size, unsigned int id, uint64_t offset) { struct kgsl_mem_entry *entry; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); - if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING)) + if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING)) return; /* Only the first buffer entry counts - ignore the rest */ - if (cmdbatch->profiling_buf_entry != NULL) + if (cmdobj->profiling_buf_entry != NULL) return; if (id != 0) - entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv, + entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv, id); else - entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv, + entry = kgsl_sharedmem_find(drawobj->context->proc_priv, gpuaddr); if (entry != NULL) { @@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device, if (entry == NULL) { KGSL_DRV_ERR(device, "ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n", - cmdbatch->context->id, id, offset, gpuaddr, size); + drawobj->context->id, id, offset, gpuaddr, size); return; } - cmdbatch->profiling_buf_entry = entry; + cmdobj->profiling_buf_entry = entry; if (id != 0) - cmdbatch->profiling_buffer_gpuaddr = + cmdobj->profiling_buffer_gpuaddr = entry->memdesc.gpuaddr + offset; else - cmdbatch->profiling_buffer_gpuaddr = gpuaddr; + cmdobj->profiling_buffer_gpuaddr = gpuaddr; } /** - * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch - * @cmdbatch: Pointer to the cmdbatch + * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command + * batch + * @cmdobj: Pointer to the ib * @ibdesc: Pointer to the user-specified struct defining the memory or IB * - * Create a new memory entry in the cmdbatch based on the user specified - * parameters + * Create a new memory entry in the ib based on the + * user specified parameters */ -int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc) +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc) { uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr; uint64_t size = (uint64_t) ibdesc->sizedwords << 2; struct kgsl_memobj_node *mem; + struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj); /* sanitize the ibdesc ctrl flags */ ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER; - if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST && + if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) { - add_profiling_buffer(device, cmdbatch, + add_profiling_buffer(device, cmdobj, gpuaddr, size, 0, 0); return 0; } } - if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)) + /* Ignore if SYNC or MARKER is specified */ + if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE)) return 0; mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL); @@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device, mem->offset = 0; mem->flags = 0; - if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST && - ibdesc->ctrl & KGSL_IBDESC_MEMLIST) { + if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST && + ibdesc->ctrl & KGSL_IBDESC_MEMLIST) /* add to the memlist */ - list_add_tail(&mem->node, &cmdbatch->memlist); - } else { + list_add_tail(&mem->node, &cmdobj->memlist); + else { /* set the preamble flag if directed to */ - if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE && - list_empty(&cmdbatch->cmdlist)) + if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE && + list_empty(&cmdobj->cmdlist)) mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE; /* add to the cmd list */ - list_add_tail(&mem->node, &cmdbatch->cmdlist); + list_add_tail(&mem->node, &cmdobj->cmdlist); } return 0; } +static inline int drawobj_init(struct kgsl_device *device, + struct kgsl_context *context, struct kgsl_drawobj *drawobj, + unsigned int type) +{ + /* + * Increase the reference count on the context so it doesn't disappear + * during the lifetime of this object + */ + if (!_kgsl_context_get(context)) + return -ENOENT; + + kref_init(&drawobj->refcount); + + drawobj->device = device; + drawobj->context = context; + drawobj->type = type; + + return 0; +} + /** - * kgsl_cmdbatch_create() - Create a new cmdbatch structure + * kgsl_drawobj_sync_create() - Create a new sync obj + * structure * @device: Pointer to a KGSL device struct * @context: Pointer to a KGSL context struct - * @flags: Flags for the cmdbatch * - * Allocate an new cmdbatch structure + * Allocate an new kgsl_drawobj_sync structure */ -struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device, - struct kgsl_context *context, unsigned int flags) +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context) { - struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL); - if (cmdbatch == NULL) + struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj), + GFP_KERNEL); + if (syncobj == NULL) return ERR_PTR(-ENOMEM); - /* - * Increase the reference count on the context so it doesn't disappear - * during the lifetime of this command batch - */ + if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) { + kfree(syncobj); + return ERR_PTR(-ENOENT); + } + + /* Add a timer to help debug sync deadlocks */ + setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj); + + return syncobj; +} + +/** + * kgsl_drawobj_cmd_create() - Create a new command obj + * structure + * @device: Pointer to a KGSL device struct + * @context: Pointer to a KGSL context struct + * @flags: Flags for the command obj + * @type: type of cmdobj MARKER/CMD + * + * Allocate a new kgsl_drawobj_cmd structure + */ +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type) +{ + struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL); + struct kgsl_drawobj *drawobj; + + if (cmdobj == NULL) + return ERR_PTR(-ENOMEM); - if (!_kgsl_context_get(context)) { - kfree(cmdbatch); + type &= CMDOBJ_TYPE | MARKEROBJ_TYPE; + if (type == 0) { + kfree(cmdobj); + return ERR_PTR(-EINVAL); + } + + drawobj = DRAWOBJ(cmdobj); + + if (drawobj_init(device, context, drawobj, type)) { + kfree(cmdobj); return ERR_PTR(-ENOENT); } - kref_init(&cmdbatch->refcount); - INIT_LIST_HEAD(&cmdbatch->cmdlist); - INIT_LIST_HEAD(&cmdbatch->memlist); - - cmdbatch->device = device; - cmdbatch->context = context; - /* sanitize our flags for cmdbatches */ - cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH - | KGSL_CMDBATCH_MARKER - | KGSL_CMDBATCH_END_OF_FRAME - | KGSL_CMDBATCH_SYNC - | KGSL_CMDBATCH_PWR_CONSTRAINT - | KGSL_CMDBATCH_MEMLIST - | KGSL_CMDBATCH_PROFILING - | KGSL_CMDBATCH_PROFILING_KTIME); + /* sanitize our flags for drawobj's */ + drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH + | KGSL_DRAWOBJ_MARKER + | KGSL_DRAWOBJ_END_OF_FRAME + | KGSL_DRAWOBJ_PWR_CONSTRAINT + | KGSL_DRAWOBJ_MEMLIST + | KGSL_DRAWOBJ_PROFILING + | KGSL_DRAWOBJ_PROFILING_KTIME); - /* Add a timer to help debug sync deadlocks */ - setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer, - (unsigned long) cmdbatch); + INIT_LIST_HEAD(&cmdobj->cmdlist); + INIT_LIST_HEAD(&cmdobj->memlist); - return cmdbatch; + return cmdobj; } #ifdef CONFIG_COMPAT static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { int i, ret = 0; struct kgsl_ibdesc_compat ibdesc32; @@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, ibdesc.sizedwords = (size_t) ibdesc32.sizedwords; ibdesc.ctrl = (unsigned int) ibdesc32.ctrl; - ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) break; @@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device, } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint_compat sync32; struct kgsl_cmd_syncpoint sync; @@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device, sync.priv = compat_ptr(sync32.priv); sync.size = (size_t) sync32.size; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) break; @@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device, } #else static int add_ibdesc_list_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { return -EINVAL; } static int add_syncpoints_compat(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { return -EINVAL; } #endif -int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) +/* Returns: + * -EINVAL: Bad data + * 0: All data fields are empty (nothing to do) + * 1: All list information is valid + */ +static int _verify_input_list(unsigned int count, void __user *ptr, + unsigned int size) +{ + /* Return early if nothing going on */ + if (count == 0 && ptr == NULL && size == 0) + return 0; + + /* Sanity check inputs */ + if (count == 0 || ptr == NULL || size == 0) + return -EINVAL; + + return 1; +} + +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count) { struct kgsl_ibdesc ibdesc; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); int i, ret; + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) + return 0; + + ret = _verify_input_list(count, ptr, sizeof(ibdesc)); + if (ret <= 0) + return -EINVAL; + if (is_compat_task()) - return add_ibdesc_list_compat(device, cmdbatch, ptr, count); + return add_ibdesc_list_compat(device, cmdobj, ptr, count); for (i = 0; i < count; i++) { memset(&ibdesc, 0, sizeof(ibdesc)); @@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc))) return -EFAULT; - ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc); + ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc); if (ret) return ret; @@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count) +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count) { struct kgsl_cmd_syncpoint sync; int i, ret; @@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, if (count == 0) return 0; - if (count > KGSL_MAX_SYNCPOINTS) - return -EINVAL; - - cmdbatch->synclist = kcalloc(count, - sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL); + syncobj->synclist = kcalloc(count, + sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (cmdbatch->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; if (is_compat_task()) - return add_syncpoints_compat(device, cmdbatch, ptr, count); + return add_syncpoints_compat(device, syncobj, ptr, count); for (i = 0; i < count; i++) { memset(&sync, 0, sizeof(sync)); @@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, if (copy_from_user(&sync, ptr, sizeof(sync))) return -EFAULT; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; @@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device, return 0; } -static int kgsl_cmdbatch_add_object(struct list_head *head, +static int drawobj_add_object(struct list_head *head, struct kgsl_command_object *obj) { struct kgsl_memobj_node *mem; @@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head, KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \ KGSL_CMDLIST_IB_PREAMBLE) -int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */ +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; - - /* Ignore all if SYNC or MARKER is specified */ - if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)) - return 0; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, if (!(obj.flags & CMDLIST_FLAGS)) { KGSL_DRV_ERR(device, "invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - cmdbatch->context->id, obj.flags, obj.id, + baseobj->context->id, obj.flags, obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } - ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj); + ret = drawobj_add_object(&cmdobj->cmdlist, &obj); if (ret) return ret; @@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_object obj; - int i, ret = 0; + struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj); + int i, ret; - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) + /* Ignore everything if this is a MARKER */ + if (baseobj->type & MARKEROBJ_TYPE) return 0; - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) + return ret; for (i = 0; i < count; i++) { memset(&obj, 0, sizeof(obj)); @@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) { KGSL_DRV_ERR(device, "invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n", - cmdbatch->context->id, obj.flags, obj.id, - obj.offset, obj.gpuaddr, obj.size); + DRAWOBJ(cmdobj)->context->id, obj.flags, + obj.id, obj.offset, obj.gpuaddr, obj.size); return -EINVAL; } if (obj.flags & KGSL_OBJLIST_PROFILE) - add_profiling_buffer(device, cmdbatch, obj.gpuaddr, + add_profiling_buffer(device, cmdobj, obj.gpuaddr, obj.size, obj.id, obj.offset); else { - ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist, - &obj); + ret = drawobj_add_object(&cmdobj->memlist, &obj); if (ret) return ret; } @@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device, return 0; } -int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, - struct kgsl_cmdbatch *cmdbatch, void __user *ptr, +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, unsigned int size, unsigned int count) { struct kgsl_command_syncpoint syncpoint; struct kgsl_cmd_syncpoint sync; - int i, ret = 0; - - /* Return early if nothing going on */ - if (count == 0 && ptr == NULL && size == 0) - return 0; - - /* Sanity check inputs */ - if (count == 0 || ptr == NULL || size == 0) - return -EINVAL; + int i, ret; - if (count > KGSL_MAX_SYNCPOINTS) + /* If creating a sync and the data is not there or wrong then error */ + ret = _verify_input_list(count, ptr, size); + if (ret <= 0) return -EINVAL; - cmdbatch->synclist = kcalloc(count, - sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL); + syncobj->synclist = kcalloc(count, + sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL); - if (cmdbatch->synclist == NULL) + if (syncobj->synclist == NULL) return -ENOMEM; for (i = 0; i < count; i++) { @@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, sync.priv = to_user_ptr(syncpoint.priv); sync.size = syncpoint.size; - ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync); + ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync); if (ret) return ret; @@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device, return 0; } -void kgsl_cmdbatch_exit(void) +void kgsl_drawobj_exit(void) { if (memobjs_cache != NULL) kmem_cache_destroy(memobjs_cache); } -int kgsl_cmdbatch_init(void) +int kgsl_drawobj_init(void) { memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0); if (memobjs_cache == NULL) { diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h new file mode 100644 index 000000000000..89ed944c539a --- /dev/null +++ b/drivers/gpu/msm/kgsl_drawobj.h @@ -0,0 +1,198 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __KGSL_DRAWOBJ_H +#define __KGSL_DRAWOBJ_H + +#define DRAWOBJ(obj) (&obj->base) +#define SYNCOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_sync, base) +#define CMDOBJ(obj) \ + container_of(obj, struct kgsl_drawobj_cmd, base) + +#define CMDOBJ_TYPE BIT(0) +#define MARKEROBJ_TYPE BIT(1) +#define SYNCOBJ_TYPE BIT(2) + +/** + * struct kgsl_drawobj - KGSL drawobj descriptor + * @device: KGSL GPU device that the command was created for + * @context: KGSL context that created the command + * @type: Object type + * @timestamp: Timestamp assigned to the command + * @flags: flags + * @refcount: kref structure to maintain the reference count + */ +struct kgsl_drawobj { + struct kgsl_device *device; + struct kgsl_context *context; + uint32_t type; + uint32_t timestamp; + unsigned long flags; + struct kref refcount; +}; + +/** + * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker + * cmds also since markers are special form of cmds that do not + * need their cmds to be executed. + * @base: Base kgsl_drawobj + * @priv: Internal flags + * @global_ts: The ringbuffer timestamp corresponding to this + * command obj + * @fault_policy: Internal policy describing how to handle this command in case + * of a fault + * @fault_recovery: recovery actions actually tried for this batch + * be hung + * @refcount: kref structure to maintain the reference count + * @cmdlist: List of IBs to issue + * @memlist: List of all memory used in this command batch + * @marker_timestamp: For markers, the timestamp of the last "real" command that + * was queued + * @profiling_buf_entry: Mem entry containing the profiling buffer + * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here + * for easy access + * @profile_index: Index to store the start/stop ticks in the kernel profiling + * buffer + * @submit_ticks: Variable to hold ticks at the time of + * command obj submit. + + */ +struct kgsl_drawobj_cmd { + struct kgsl_drawobj base; + unsigned long priv; + unsigned int global_ts; + unsigned long fault_policy; + unsigned long fault_recovery; + struct list_head cmdlist; + struct list_head memlist; + unsigned int marker_timestamp; + struct kgsl_mem_entry *profiling_buf_entry; + uint64_t profiling_buffer_gpuaddr; + unsigned int profile_index; + uint64_t submit_ticks; +}; + +/** + * struct kgsl_drawobj_sync - KGSL sync object + * @base: Base kgsl_drawobj, this needs to be the first entry + * @synclist: Array of context/timestamp tuples to wait for before issuing + * @numsyncs: Number of sync entries in the array + * @pending: Bitmask of sync events that are active + * @timer: a timer used to track possible sync timeouts for this + * sync obj + * @timeout_jiffies: For a sync obj the jiffies at + * which the timer will expire + */ +struct kgsl_drawobj_sync { + struct kgsl_drawobj base; + struct kgsl_drawobj_sync_event *synclist; + unsigned int numsyncs; + unsigned long pending; + struct timer_list timer; + unsigned long timeout_jiffies; +}; + +/** + * struct kgsl_drawobj_sync_event + * @id: identifer (positiion within the pending bitmap) + * @type: Syncpoint type + * @syncobj: Pointer to the syncobj that owns the sync event + * @context: KGSL context for whose timestamp we want to + * register this event + * @timestamp: Pending timestamp for the event + * @handle: Pointer to a sync fence handle + * @device: Pointer to the KGSL device + */ +struct kgsl_drawobj_sync_event { + unsigned int id; + int type; + struct kgsl_drawobj_sync *syncobj; + struct kgsl_context *context; + unsigned int timestamp; + struct kgsl_sync_fence_waiter *handle; + struct kgsl_device *device; +}; + +#define KGSL_DRAWOBJ_FLAGS \ + { KGSL_DRAWOBJ_MARKER, "MARKER" }, \ + { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \ + { KGSL_DRAWOBJ_SYNC, "SYNC" }, \ + { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \ + { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \ + { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" } + +/** + * enum kgsl_drawobj_cmd_priv - Internal command obj flags + * @CMDOBJ_SKIP - skip the entire command obj + * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for + * command obj + * @CMDOBJ_WFI - Force wait-for-idle for the submission + * @CMDOBJ_PROFILE - store the start / retire ticks for + * the command obj in the profiling buffer + */ +enum kgsl_drawobj_cmd_priv { + CMDOBJ_SKIP = 0, + CMDOBJ_FORCE_PREAMBLE, + CMDOBJ_WFI, + CMDOBJ_PROFILE, +}; + +struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device, + struct kgsl_context *context, unsigned int flags, + unsigned int type); +int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc); +int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count); +int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, + unsigned int size, unsigned int count); +int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device, + struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, + unsigned int size, unsigned int count); + +struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device, + struct kgsl_context *context); +int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, + int count); +int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, void __user *ptr, + unsigned int size, unsigned int count); +int kgsl_drawobj_sync_add_sync(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj, + struct kgsl_cmd_syncpoint *sync); + +int kgsl_drawobj_init(void); +void kgsl_drawobj_exit(void); + +void kgsl_dump_syncpoints(struct kgsl_device *device, + struct kgsl_drawobj_sync *syncobj); + +void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj); + +static inline bool kgsl_drawobj_events_pending( + struct kgsl_drawobj_sync *syncobj) +{ + return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS); +} + +static inline bool kgsl_drawobj_event_pending( + struct kgsl_drawobj_sync *syncobj, unsigned int bit) +{ + if (bit >= KGSL_MAX_SYNCPOINTS) + return false; + + return test_bit(bit, &syncobj->pending); +} +#endif /* __KGSL_DRAWOBJ_H */ diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 71b6086423d6..9f35a3197a4c 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { int ret = 0; struct kgsl_iommu_pt *iommu_pt = NULL; - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); unsigned int cb_num; struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu); struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; @@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) if (IS_ERR(iommu_pt)) return PTR_ERR(iommu_pt); - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - if (kgsl_mmu_is_perprocess(mmu)) { ret = iommu_domain_set_attr(iommu_pt->domain, DOMAIN_ATTR_PROCID, &pt->name); @@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) int ret = 0; struct kgsl_iommu_pt *iommu_pt = NULL; struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu); - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; int secure_vmid = VMID_CP_PIXEL; unsigned int cb_num; @@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) if (IS_ERR(iommu_pt)) return PTR_ERR(iommu_pt); - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - ret = iommu_domain_set_attr(iommu_pt->domain, DOMAIN_ATTR_SECURE_VMID, &secure_vmid); if (ret) { @@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; int dynamic = 1; unsigned int cb_num = ctx->cb_num; - int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW); iommu_pt = _alloc_pt(ctx->dev, mmu, pt); @@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } - iommu_domain_set_attr(iommu_pt->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - ret = _attach_pt(iommu_pt, ctx); if (ret) goto done; @@ -2492,7 +2480,6 @@ static const struct { { "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE }, { "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC }, { "qcom,force-32bit", KGSL_MMU_FORCE_32BIT }, - { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW }, }; static int _kgsl_iommu_probe(struct kgsl_device *device, diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index acbc0e784cf2..3e32c25b3dbe 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops { #define KGSL_MMU_FORCE_32BIT BIT(5) /* 64 bit address is live */ #define KGSL_MMU_64BIT BIT(6) -/* MMU can do coherent hardware table walks */ -#define KGSL_MMU_COHERENT_HTW BIT(7) /* The MMU supports non-contigious pages */ #define KGSL_MMU_PAGED BIT(8) /* The device requires a guard page */ diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c index 0fcc0c3b0d49..d71c6a63f2d3 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.c +++ b/drivers/gpu/msm/kgsl_pwrctrl.c @@ -21,6 +21,7 @@ #include <linux/delay.h> #include <linux/msm_adreno_devfreq.h> #include <linux/of_device.h> +#include <linux/thermal.h> #include "kgsl.h" #include "kgsl_pwrscale.h" @@ -590,22 +591,10 @@ static ssize_t kgsl_pwrctrl_max_pwrlevel_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", pwr->max_pwrlevel); } -static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ struct kgsl_device *device = kgsl_device_from_dev(dev); - struct kgsl_pwrctrl *pwr; - int ret; - unsigned int level = 0; - - if (device == NULL) - return 0; - - pwr = &device->pwrctrl; - - ret = kgsl_sysfs_store(buf, &level); - if (ret) - return ret; +static void kgsl_pwrctrl_min_pwrlevel_set(struct kgsl_device *device, + int level) +{ + struct kgsl_pwrctrl *pwr = &device->pwrctrl; mutex_lock(&device->mutex); if (level > pwr->num_pwrlevels - 2) @@ -621,6 +610,24 @@ static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev, kgsl_pwrctrl_pwrlevel_change(device, pwr->active_pwrlevel); mutex_unlock(&device->mutex); +} + +static ssize_t kgsl_pwrctrl_min_pwrlevel_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + int ret; + unsigned int level = 0; + + if (device == NULL) + return 0; + + ret = kgsl_sysfs_store(buf, &level); + if (ret) + return ret; + + kgsl_pwrctrl_min_pwrlevel_set(device, level); return count; } @@ -664,24 +671,13 @@ static int _get_nearest_pwrlevel(struct kgsl_pwrctrl *pwr, unsigned int clock) return -ERANGE; } -static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static void kgsl_pwrctrl_max_clock_set(struct kgsl_device *device, int val) { - struct kgsl_device *device = kgsl_device_from_dev(dev); struct kgsl_pwrctrl *pwr; - unsigned int val = 0; - int level, ret; - - if (device == NULL) - return 0; + int level; pwr = &device->pwrctrl; - ret = kgsl_sysfs_store(buf, &val); - if (ret) - return ret; - mutex_lock(&device->mutex); level = _get_nearest_pwrlevel(pwr, val); /* If the requested power level is not supported by hw, try cycling */ @@ -715,21 +711,37 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev, if (pwr->sysfs_pwr_limit) kgsl_pwr_limits_set_freq(pwr->sysfs_pwr_limit, pwr->pwrlevels[level].gpu_freq); - return count; + return; err: mutex_unlock(&device->mutex); - return count; } -static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t kgsl_pwrctrl_max_gpuclk_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct kgsl_device *device = kgsl_device_from_dev(dev); + unsigned int val = 0; + int ret; + + if (device == NULL) + return 0; + + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; + + kgsl_pwrctrl_max_clock_set(device, val); + + return count; +} + +static unsigned int kgsl_pwrctrl_max_clock_get(struct kgsl_device *device) +{ struct kgsl_pwrctrl *pwr; unsigned int freq; + if (device == NULL) return 0; pwr = &device->pwrctrl; @@ -743,7 +755,17 @@ static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev, (TH_HZ - pwr->thermal_timeout) * (hfreq / TH_HZ); } - return snprintf(buf, PAGE_SIZE, "%d\n", freq); + return freq; +} + +static ssize_t kgsl_pwrctrl_max_gpuclk_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + + return snprintf(buf, PAGE_SIZE, "%d\n", + kgsl_pwrctrl_max_clock_get(device)); } static ssize_t kgsl_pwrctrl_gpuclk_store(struct device *dev, @@ -903,9 +925,14 @@ static ssize_t kgsl_pwrctrl_gpu_available_frequencies_show( if (device == NULL) return 0; pwr = &device->pwrctrl; - for (index = 0; index < pwr->num_pwrlevels - 1; index++) - num_chars += snprintf(buf + num_chars, PAGE_SIZE, "%d ", - pwr->pwrlevels[index].gpu_freq); + for (index = 0; index < pwr->num_pwrlevels - 1; index++) { + num_chars += scnprintf(buf + num_chars, + PAGE_SIZE - num_chars - 1, + "%d ", pwr->pwrlevels[index].gpu_freq); + /* One space for trailing null and another for the newline */ + if (num_chars >= PAGE_SIZE - 2) + break; + } buf[num_chars++] = '\n'; return num_chars; } @@ -1171,6 +1198,195 @@ static ssize_t kgsl_popp_show(struct device *dev, test_bit(POPP_ON, &device->pwrscale.popp_state)); } +static ssize_t kgsl_pwrctrl_gpu_model_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + char model_str[32] = {0}; + + if (device == NULL) + return 0; + + device->ftbl->gpu_model(device, model_str, sizeof(model_str)); + + return snprintf(buf, PAGE_SIZE, "%s\n", model_str); +} + +static ssize_t kgsl_pwrctrl_gpu_busy_percentage_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_clk_stats *stats; + unsigned int busy_percent = 0; + + if (device == NULL) + return 0; + stats = &device->pwrctrl.clk_stats; + + if (stats->total_old != 0) + busy_percent = (stats->busy_old * 100) / stats->total_old; + + ret = snprintf(buf, PAGE_SIZE, "%d %%\n", busy_percent); + + /* Reset the stats if GPU is OFF */ + if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) { + stats->busy_old = 0; + stats->total_old = 0; + } + return ret; +} + +static ssize_t kgsl_pwrctrl_min_clock_mhz_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + + if (device == NULL) + return 0; + pwr = &device->pwrctrl; + + return snprintf(buf, PAGE_SIZE, "%d\n", + pwr->pwrlevels[pwr->min_pwrlevel].gpu_freq / 1000000); +} + +static ssize_t kgsl_pwrctrl_min_clock_mhz_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + int level, ret; + unsigned int freq; + struct kgsl_pwrctrl *pwr; + + if (device == NULL) + return 0; + + pwr = &device->pwrctrl; + + ret = kgsl_sysfs_store(buf, &freq); + if (ret) + return ret; + + freq *= 1000000; + level = _get_nearest_pwrlevel(pwr, freq); + + if (level >= 0) + kgsl_pwrctrl_min_pwrlevel_set(device, level); + + return count; +} + +static ssize_t kgsl_pwrctrl_max_clock_mhz_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + unsigned int freq; + + if (device == NULL) + return 0; + + freq = kgsl_pwrctrl_max_clock_get(device); + + return snprintf(buf, PAGE_SIZE, "%d\n", freq / 1000000); +} + +static ssize_t kgsl_pwrctrl_max_clock_mhz_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + unsigned int val = 0; + int ret; + + if (device == NULL) + return 0; + + ret = kgsl_sysfs_store(buf, &val); + if (ret) + return ret; + + val *= 1000000; + kgsl_pwrctrl_max_clock_set(device, val); + + return count; +} + +static ssize_t kgsl_pwrctrl_clock_mhz_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + + if (device == NULL) + return 0; + + return snprintf(buf, PAGE_SIZE, "%ld\n", + kgsl_pwrctrl_active_freq(&device->pwrctrl) / 1000000); +} + +static ssize_t kgsl_pwrctrl_freq_table_mhz_show( + struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + int index, num_chars = 0; + + if (device == NULL) + return 0; + + pwr = &device->pwrctrl; + for (index = 0; index < pwr->num_pwrlevels - 1; index++) { + num_chars += scnprintf(buf + num_chars, + PAGE_SIZE - num_chars - 1, + "%d ", pwr->pwrlevels[index].gpu_freq / 1000000); + /* One space for trailing null and another for the newline */ + if (num_chars >= PAGE_SIZE - 2) + break; + } + + buf[num_chars++] = '\n'; + + return num_chars; +} + +static ssize_t kgsl_pwrctrl_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct kgsl_device *device = kgsl_device_from_dev(dev); + struct kgsl_pwrctrl *pwr; + int ret, id = 0, temperature = 0; + + if (device == NULL) + goto done; + + pwr = &device->pwrctrl; + + if (!pwr->tsens_name) + goto done; + + id = sensor_get_id((char *)pwr->tsens_name); + if (id < 0) + goto done; + + ret = sensor_get_temp(id, &temperature); + if (ret) + goto done; + + return snprintf(buf, PAGE_SIZE, "%d\n", + temperature); +done: + return 0; +} + static DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store); static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show, @@ -1222,6 +1438,17 @@ static DEVICE_ATTR(popp, 0644, kgsl_popp_show, kgsl_popp_store); static DEVICE_ATTR(force_no_nap, 0644, kgsl_pwrctrl_force_no_nap_show, kgsl_pwrctrl_force_no_nap_store); +static DEVICE_ATTR(gpu_model, 0444, kgsl_pwrctrl_gpu_model_show, NULL); +static DEVICE_ATTR(gpu_busy_percentage, 0444, + kgsl_pwrctrl_gpu_busy_percentage_show, NULL); +static DEVICE_ATTR(min_clock_mhz, 0644, kgsl_pwrctrl_min_clock_mhz_show, + kgsl_pwrctrl_min_clock_mhz_store); +static DEVICE_ATTR(max_clock_mhz, 0644, kgsl_pwrctrl_max_clock_mhz_show, + kgsl_pwrctrl_max_clock_mhz_store); +static DEVICE_ATTR(clock_mhz, 0444, kgsl_pwrctrl_clock_mhz_show, NULL); +static DEVICE_ATTR(freq_table_mhz, 0444, + kgsl_pwrctrl_freq_table_mhz_show, NULL); +static DEVICE_ATTR(temp, 0444, kgsl_pwrctrl_temp_show, NULL); static const struct device_attribute *pwrctrl_attr_list[] = { &dev_attr_gpuclk, @@ -1243,12 +1470,50 @@ static const struct device_attribute *pwrctrl_attr_list[] = { &dev_attr_bus_split, &dev_attr_default_pwrlevel, &dev_attr_popp, + &dev_attr_gpu_model, + &dev_attr_gpu_busy_percentage, + &dev_attr_min_clock_mhz, + &dev_attr_max_clock_mhz, + &dev_attr_clock_mhz, + &dev_attr_freq_table_mhz, + &dev_attr_temp, NULL }; +struct sysfs_link { + const char *src; + const char *dst; +}; + +static struct sysfs_link link_names[] = { + { "gpu_model", "gpu_model",}, + { "gpu_busy_percentage", "gpu_busy",}, + { "min_clock_mhz", "gpu_min_clock",}, + { "max_clock_mhz", "gpu_max_clock",}, + { "clock_mhz", "gpu_clock",}, + { "freq_table_mhz", "gpu_freq_table",}, + { "temp", "gpu_tmu",}, +}; + int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device) { - return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list); + int i, ret; + + ret = kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list); + if (ret) + return ret; + + device->gpu_sysfs_kobj = kobject_create_and_add("gpu", kernel_kobj); + if (IS_ERR_OR_NULL(device->gpu_sysfs_kobj)) + return (device->gpu_sysfs_kobj == NULL) ? + -ENOMEM : PTR_ERR(device->gpu_sysfs_kobj); + + for (i = 0; i < ARRAY_SIZE(link_names); i++) + kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj, + &device->dev->kobj, link_names[i].src, + link_names[i].dst); + + return 0; } void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device) @@ -1860,6 +2125,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device) kgsl_pwrctrl_vbif_init(); + /* temperature sensor name */ + of_property_read_string(pdev->dev.of_node, "qcom,tsens-name", + &pwr->tsens_name); + return result; } diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h index ae21a274fada..2de42d87bcbe 100644 --- a/drivers/gpu/msm/kgsl_pwrctrl.h +++ b/drivers/gpu/msm/kgsl_pwrctrl.h @@ -152,6 +152,7 @@ struct kgsl_regulator { * @sysfs_pwr_limit - pointer to the sysfs limits node * isense_clk_indx - index of isense clock, 0 if no isense * isense_clk_on_level - isense clock rate is XO rate below this level. + * tsens_name - pointer to temperature sensor name of GPU temperature sensor */ struct kgsl_pwrctrl { @@ -204,6 +205,7 @@ struct kgsl_pwrctrl { struct kgsl_pwr_limit *sysfs_pwr_limit; unsigned int gpu_bimc_int_clk_freq; bool gpu_bimc_interface_enabled; + const char *tsens_name; }; int kgsl_pwrctrl_init(struct kgsl_device *device); diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c index d90aec42f30a..01d3b74c16fd 100644 --- a/drivers/gpu/msm/kgsl_pwrscale.c +++ b/drivers/gpu/msm/kgsl_pwrscale.c @@ -910,6 +910,14 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor) pwrscale->history[i].type = i; } + /* Add links to the devfreq sysfs nodes */ + kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj, + &pwrscale->devfreqptr->dev.kobj, "governor", + "gpu_governor"); + kgsl_gpu_sysfs_add_link(device->gpu_sysfs_kobj, + &pwrscale->devfreqptr->dev.kobj, + "available_governors", "gpu_available_governor"); + return 0; } EXPORT_SYMBOL(kgsl_pwrscale_init); diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c index 2c9f17f9e7a4..a2e4a909062f 100644 --- a/drivers/gpu/msm/kgsl_snapshot.c +++ b/drivers/gpu/msm/kgsl_snapshot.c @@ -810,6 +810,29 @@ static ssize_t faultcount_store(struct kgsl_device *device, const char *buf, return count; } +/* Show the force_panic request status */ +static ssize_t force_panic_show(struct kgsl_device *device, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", device->force_panic); +} + +/* Store the panic request value to force_panic */ +static ssize_t force_panic_store(struct kgsl_device *device, const char *buf, + size_t count) +{ + unsigned int val = 0; + int ret; + + if (device && count > 0) + device->force_panic = 0; + + ret = kgsl_sysfs_store(buf, &val); + + if (!ret && device) + device->force_panic = (bool)val; + + return (ssize_t) ret < 0 ? ret : count; +} /* Show the timestamp of the last collected snapshot */ static ssize_t timestamp_show(struct kgsl_device *device, char *buf) { @@ -835,6 +858,7 @@ struct kgsl_snapshot_attribute attr_##_name = { \ static SNAPSHOT_ATTR(timestamp, 0444, timestamp_show, NULL); static SNAPSHOT_ATTR(faultcount, 0644, faultcount_show, faultcount_store); +static SNAPSHOT_ATTR(force_panic, 0644, force_panic_show, force_panic_store); static ssize_t snapshot_sysfs_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -914,6 +938,7 @@ int kgsl_device_snapshot_init(struct kgsl_device *device) device->snapshot = NULL; device->snapshot_faultcount = 0; + device->force_panic = 0; ret = kobject_init_and_add(&device->snapshot_kobj, &ktype_snapshot, &device->dev->kobj, "snapshot"); @@ -929,7 +954,11 @@ int kgsl_device_snapshot_init(struct kgsl_device *device) goto done; ret = sysfs_create_file(&device->snapshot_kobj, &attr_faultcount.attr); + if (ret) + goto done; + ret = sysfs_create_file(&device->snapshot_kobj, + &attr_force_panic.attr); done: return ret; } @@ -954,6 +983,7 @@ void kgsl_device_snapshot_close(struct kgsl_device *device) device->snapshot_memory.ptr = NULL; device->snapshot_memory.size = 0; device->snapshot_faultcount = 0; + device->force_panic = 0; } EXPORT_SYMBOL(kgsl_device_snapshot_close); @@ -1032,6 +1062,7 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work) { struct kgsl_snapshot *snapshot = container_of(work, struct kgsl_snapshot, work); + struct kgsl_device *device = kgsl_get_device(KGSL_DEVICE_3D0); struct kgsl_snapshot_object *obj, *tmp; size_t size = 0; void *ptr; @@ -1073,12 +1104,15 @@ done: snapshot->process = NULL; if (snapshot->ib1base && !snapshot->ib1dumped) - pr_warn("kgsl: snapshot: Active IB1:%016llx not dumped\n", + KGSL_DRV_ERR(device, + "snapshot: Active IB1:%016llx not dumped\n", snapshot->ib1base); else if (snapshot->ib2base && !snapshot->ib2dumped) - pr_warn("kgsl: snapshot: Active IB2:%016llx not dumped\n", + KGSL_DRV_ERR(device, + "snapshot: Active IB2:%016llx not dumped\n", snapshot->ib2base); complete_all(&snapshot->dump_gate); + BUG_ON(device->force_panic); return; } diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h index 4ef9f80177d6..6438c6e65b97 100644 --- a/drivers/gpu/msm/kgsl_trace.h +++ b/drivers/gpu/msm/kgsl_trace.h @@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds, TP_PROTO(struct kgsl_device *device, int drawctxt_id, - struct kgsl_cmdbatch *cmdbatch, unsigned int numibs, int timestamp, int flags, int result, unsigned int type), - TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp, + TP_ARGS(device, drawctxt_id, numibs, timestamp, flags, result, type), TP_STRUCT__entry( @@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds, __entry->numibs, __entry->timestamp, __entry->flags ? __print_flags(__entry->flags, "|", - KGSL_CMDBATCH_FLAGS) : "None", + KGSL_DRAWOBJ_FLAGS) : "None", __entry->result, __print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES) ) @@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy, ); DECLARE_EVENT_CLASS(syncpoint_timestamp_template, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp), + TP_ARGS(syncobj, context, timestamp), TP_STRUCT__entry( - __field(unsigned int, cmdbatch_context_id) + __field(unsigned int, syncobj_context_id) __field(unsigned int, context_id) __field(unsigned int, timestamp) ), TP_fast_assign( - __entry->cmdbatch_context_id = cmdbatch->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __entry->context_id = context->id; __entry->timestamp = timestamp; ), TP_printk("ctx=%d sync ctx=%d ts=%d", - __entry->cmdbatch_context_id, __entry->context_id, + __entry->syncobj_context_id, __entry->context_id, __entry->timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context, + TP_PROTO(struct kgsl_drawobj_sync *syncobj, + struct kgsl_context *context, unsigned int timestamp), - TP_ARGS(cmdbatch, context, timestamp) + TP_ARGS(syncobj, context, timestamp) ); DECLARE_EVENT_CLASS(syncpoint_fence_template, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name), + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name), TP_STRUCT__entry( __string(fence_name, name) - __field(unsigned int, cmdbatch_context_id) + __field(unsigned int, syncobj_context_id) ), TP_fast_assign( - __entry->cmdbatch_context_id = cmdbatch->context->id; + __entry->syncobj_context_id = syncobj->base.context->id; __assign_str(fence_name, name); ), TP_printk("ctx=%d fence=%s", - __entry->cmdbatch_context_id, __get_str(fence_name)) + __entry->syncobj_context_id, __get_str(fence_name)) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire, - TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name), - TP_ARGS(cmdbatch, name) + TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name), + TP_ARGS(syncobj, name) ); TRACE_EVENT(kgsl_msg, diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index bb44b6d82ccd..ebb49230d4d7 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/qpnp/qpnp-revid.h> #define FG_ADC_RR_EN_CTL 0x46 #define FG_ADC_RR_SKIN_TEMP_LSB 0x50 @@ -150,13 +151,18 @@ #define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000 #define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3 -#define FG_ADC_RR_DIE_TEMP_OFFSET 600000 +#define FG_ADC_RR_DIE_TEMP_OFFSET 601400 #define FG_ADC_RR_DIE_TEMP_SLOPE 2 #define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000 -#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000 -#define FG_ADC_RR_CHG_TEMP_SLOPE 4 -#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000 +#define FAB_ID_GF 0x30 +#define FAB_ID_SMIC 0x11 +#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1296794 +#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3858 +#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1339518 +#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3598 +#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000 +#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4 #define FG_ADC_RR_VOLT_INPUT_FACTOR 8 #define FG_ADC_RR_CURR_INPUT_FACTOR 2 @@ -201,6 +207,8 @@ struct rradc_chip { struct iio_chan_spec *iio_chans; unsigned int nchannels; struct rradc_chan_prop *chan_props; + struct device_node *revid_dev_node; + struct pmic_revid_data *pmic_fab_id; }; struct rradc_channels { @@ -347,16 +355,34 @@ static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip, struct rradc_chan_prop *prop, u16 adc_code, int *result_millidegc) { - int64_t temp = 0; + int64_t uv = 0, offset = 0, slope = 0; - temp = (int64_t) adc_code * 4; - temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM; - temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN * + if (chip->revid_dev_node) { + switch (chip->pmic_fab_id->fab_id) { + case FAB_ID_GF: + offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV; + slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C; + break; + case FAB_ID_SMIC: + offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV; + slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C; + break; + default: + return -EINVAL; + } + } else { + pr_err("No temperature scaling coefficients\n"); + return -EINVAL; + } + + uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE; + uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM; + uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN * FG_MAX_ADC_READINGS)); - temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp; - temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE); - temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC; - *result_millidegc = temp; + uv = offset - uv; + uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope); + uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC; + *result_millidegc = uv; return 0; } @@ -380,15 +406,33 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip, struct rradc_chan_prop *prop, u16 adc_code, int *result_millidegc) { - int64_t temp = 0; + int64_t uv = 0, offset = 0, slope = 0; - temp = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM); - temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN * + if (chip->revid_dev_node) { + switch (chip->pmic_fab_id->fab_id) { + case FAB_ID_GF: + offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV; + slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C; + break; + case FAB_ID_SMIC: + offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV; + slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C; + break; + default: + return -EINVAL; + } + } else { + pr_err("No temperature scaling coefficients\n"); + return -EINVAL; + } + + uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM); + uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN * FG_MAX_ADC_READINGS)); - temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp; - temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE); - temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC; - *result_millidegc = temp; + uv = offset - uv; + uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope); + uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC; + *result_millidegc = uv; return 0; } @@ -516,7 +560,7 @@ static int rradc_do_conversion(struct rradc_chip *chip, buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK; if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) { - pr_warn("%s is not ready; nothing to read\n", + pr_debug("%s is not ready; nothing to read\n", rradc_chans[prop->channel].datasheet_name); rc = -ENODATA; goto fail; @@ -653,6 +697,22 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node) } chip->base = base; + chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0); + if (chip->revid_dev_node) { + chip->pmic_fab_id = get_revid_data(chip->revid_dev_node); + if (IS_ERR(chip->pmic_fab_id)) { + rc = PTR_ERR(chip->pmic_fab_id); + if (rc != -EPROBE_DEFER) + pr_err("Unable to get pmic_revid rc=%d\n", rc); + return rc; + } + + if (chip->pmic_fab_id->fab_id == -EINVAL) { + rc = chip->pmic_fab_id->fab_id; + pr_debug("Unable to read fabid rc=%d\n", rc); + } + } + iio_chan = chip->iio_chans; for (i = 0; i < RR_ADC_MAX; i++) { diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c new file mode 100644 index 000000000000..e5edaf5f908d --- /dev/null +++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c @@ -0,0 +1,296 @@ +/* drivers/input/misc/ots_pat9125/pat9125_linux_driver.c + * + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + */ + +#include <linux/input.h> +#include <linux/pm.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/of_gpio.h> +#include "pixart_ots.h" + +struct pixart_pat9125_data { + struct i2c_client *client; + struct input_dev *input; + int irq_gpio; + u32 irq_flags; +}; + +static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data, + int len) +{ + u8 buf[MAX_BUF_SIZE]; + int ret = 0, i; + struct device *dev = &client->dev; + + buf[0] = reg; + if (len >= MAX_BUF_SIZE) { + dev_err(dev, "%s Failed: buffer size is %d [Max Limit is %d]\n", + __func__, len, MAX_BUF_SIZE); + return -ENODEV; + } + for (i = 0 ; i < len; i++) + buf[i+1] = data[i]; + /* Returns negative errno, or else the number of bytes written. */ + ret = i2c_master_send(client, buf, len+1); + if (ret != len+1) + dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg); + + return ret; +} + +static int pat9125_i2c_read(struct i2c_client *client, u8 reg, u8 *data) +{ + u8 buf[MAX_BUF_SIZE]; + int ret; + struct device *dev = &client->dev; + + buf[0] = reg; + /* + * If everything went ok (1 msg transmitted), return #bytes transmitted, + * else error code. thus if transmit is ok return value 1 + */ + ret = i2c_master_send(client, buf, 1); + if (ret != 1) { + dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg); + return ret; + } + /* returns negative errno, or else the number of bytes read */ + ret = i2c_master_recv(client, buf, 1); + if (ret != 1) { + dev_err(dev, "%s Failed: reading reg 0x%x\n", __func__, reg); + return ret; + } + *data = buf[0]; + + return ret; +} + +unsigned char read_data(struct i2c_client *client, u8 addr) +{ + u8 data = 0xff; + + pat9125_i2c_read(client, addr, &data); + return data; +} + +void write_data(struct i2c_client *client, u8 addr, u8 data) +{ + pat9125_i2c_write(client, addr, &data, 1); +} + +static irqreturn_t pixart_pat9125_irq(int irq, void *data) +{ + return IRQ_HANDLED; +} + +static ssize_t pat9125_test_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + char s[256], *p = s; + int reg_data = 0, i; + long rd_addr, wr_addr, wr_data; + struct pixart_pat9125_data *data = + (struct pixart_pat9125_data *)dev->driver_data; + struct i2c_client *client = data->client; + + for (i = 0; i < sizeof(s); i++) + s[i] = buf[i]; + *(s+1) = '\0'; + *(s+4) = '\0'; + *(s+7) = '\0'; + /* example(in console): echo w 12 34 > rw_reg */ + if (*p == 'w') { + p += 2; + if (!kstrtol(p, 16, &wr_addr)) { + p += 3; + if (!kstrtol(p, 16, &wr_data)) { + dev_dbg(dev, "w 0x%x 0x%x\n", + (u8)wr_addr, (u8)wr_data); + write_data(client, (u8)wr_addr, (u8)wr_data); + } + } + } + /* example(in console): echo r 12 > rw_reg */ + else if (*p == 'r') { + p += 2; + + if (!kstrtol(p, 16, &rd_addr)) { + reg_data = read_data(client, (u8)rd_addr); + dev_dbg(dev, "r 0x%x 0x%x\n", + (unsigned int)rd_addr, reg_data); + } + } + return count; +} + +static ssize_t pat9125_test_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return 0; +} +static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP, + pat9125_test_show, pat9125_test_store); + +static struct attribute *pat9125_attr_list[] = { + &dev_attr_test.attr, + NULL, +}; + +static struct attribute_group pat9125_attr_grp = { + .attrs = pat9125_attr_list, +}; + +static int pat9125_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int err = 0; + struct pixart_pat9125_data *data; + struct input_dev *input; + struct device_node *np; + struct device *dev = &client->dev; + + err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE); + if (err < 0) { + dev_err(dev, "I2C not supported\n"); + return -ENXIO; + } + + if (client->dev.of_node) { + data = devm_kzalloc(dev, sizeof(struct pixart_pat9125_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + } else { + data = client->dev.platform_data; + if (!data) { + dev_err(dev, "Invalid pat9125 data\n"); + return -EINVAL; + } + } + data->client = client; + + input = devm_input_allocate_device(dev); + if (!input) { + dev_err(dev, "Failed to alloc input device\n"); + return -ENOMEM; + } + + i2c_set_clientdata(client, data); + input_set_drvdata(input, data); + input->name = PAT9125_DEV_NAME; + + data->input = input; + err = input_register_device(data->input); + if (err < 0) { + dev_err(dev, "Failed to register input device\n"); + goto err_register_input_device; + } + + if (!gpio_is_valid(data->irq_gpio)) { + dev_err(dev, "invalid irq_gpio: %d\n", data->irq_gpio); + return -EINVAL; + } + + err = gpio_request(data->irq_gpio, "pixart_pat9125_irq_gpio"); + if (err) { + dev_err(dev, "unable to request gpio %d\n", data->irq_gpio); + return err; + } + + err = gpio_direction_input(data->irq_gpio); + if (err) { + dev_err(dev, "unable to set dir for gpio %d\n", data->irq_gpio); + goto free_gpio; + } + + if (!ots_sensor_init(client)) { + err = -ENODEV; + goto err_sensor_init; + } + + err = devm_request_threaded_irq(dev, client->irq, NULL, + pixart_pat9125_irq, (unsigned long)data->irq_flags, + "pixart_pat9125_irq", data); + if (err) { + dev_err(dev, "Req irq %d failed, errno:%d\n", client->irq, err); + goto err_request_threaded_irq; + } + + err = sysfs_create_group(&(input->dev.kobj), &pat9125_attr_grp); + if (err) { + dev_err(dev, "Failed to create sysfs group, errno:%d\n", err); + goto err_sysfs_create; + } + + return 0; + +err_sysfs_create: +err_request_threaded_irq: +err_sensor_init: +free_gpio: + gpio_free(data->irq_gpio); +err_register_input_device: + input_free_device(data->input); + return err; +} + +static int pat9125_i2c_remove(struct i2c_client *client) +{ + struct pixart_pat9125_data *data = i2c_get_clientdata(client); + + devm_free_irq(&client->dev, client->irq, data); + if (gpio_is_valid(data->irq_gpio)) + gpio_free(data->irq_gpio); + input_unregister_device(data->input); + devm_kfree(&client->dev, data); + data = NULL; + return 0; +} + +static int pat9125_suspend(struct device *dev) +{ + return 0; +} + +static int pat9125_resume(struct device *dev) +{ + return 0; +} + +static const struct i2c_device_id pat9125_device_id[] = { + {PAT9125_DEV_NAME, 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, pat9125_device_id); + +static const struct dev_pm_ops pat9125_pm_ops = { + .suspend = pat9125_suspend, + .resume = pat9125_resume +}; + +static const struct of_device_id pixart_pat9125_match_table[] = { + { .compatible = "pixart,pat9125",}, + { }, +}; + +static struct i2c_driver pat9125_i2c_driver = { + .driver = { + .name = PAT9125_DEV_NAME, + .owner = THIS_MODULE, + .pm = &pat9125_pm_ops, + .of_match_table = pixart_pat9125_match_table, + }, + .probe = pat9125_i2c_probe, + .remove = pat9125_i2c_remove, + .id_table = pat9125_device_id, +}; +module_i2c_driver(pat9125_i2c_driver); + +MODULE_AUTHOR("pixart"); +MODULE_DESCRIPTION("pixart pat9125 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.c b/drivers/input/misc/ots_pat9125/pixart_ots.c new file mode 100644 index 000000000000..fa73ffe40985 --- /dev/null +++ b/drivers/input/misc/ots_pat9125/pixart_ots.c @@ -0,0 +1,77 @@ +/* drivers/input/misc/ots_pat9125/pixart_ots.c + * + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + */ + +#include "pixart_platform.h" +#include "pixart_ots.h" + +static void ots_write_read(struct i2c_client *client, u8 address, u8 wdata) +{ + u8 read_value; + + do { + write_data(client, address, wdata); + read_value = read_data(client, address); + } while (read_value != wdata); +} + +bool ots_sensor_init(struct i2c_client *client) +{ + unsigned char sensor_pid = 0, read_id_ok = 0; + + /* + * Read sensor_pid in address 0x00 to check if the + * serial link is valid, read value should be 0x31. + */ + sensor_pid = read_data(client, PIXART_PAT9125_PRODUCT_ID1_REG); + + if (sensor_pid == PIXART_PAT9125_SENSOR_ID) { + read_id_ok = 1; + + /* + * PAT9125 sensor recommended settings: + * switch to bank0, not allowed to perform ots_write_read + */ + write_data(client, PIXART_PAT9125_SELECT_BANK_REG, + PIXART_PAT9125_BANK0); + /* + * software reset (i.e. set bit7 to 1). + * It will reset to 0 automatically + * so perform OTS_RegWriteRead is not allowed. + */ + write_data(client, PIXART_PAT9125_CONFIG_REG, + PIXART_PAT9125_RESET); + + /* delay 1ms */ + usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1); + + /* disable write protect */ + ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG, + PIXART_PAT9125_DISABLE_WRITE_PROTECT); + /* set X-axis resolution (depends on application) */ + ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_X_REG, + PIXART_PAT9125_CPI_RESOLUTION_X); + /* set Y-axis resolution (depends on application) */ + ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_Y_REG, + PIXART_PAT9125_CPI_RESOLUTION_Y); + /* set 12-bit X/Y data format (depends on application) */ + ots_write_read(client, PIXART_PAT9125_ORIENTATION_REG, + PIXART_PAT9125_MOTION_DATA_LENGTH); + /* ONLY for VDD=VDDA=1.7~1.9V: for power saving */ + ots_write_read(client, PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG, + PIXART_PAT9125_LOW_VOLTAGE_SEGMENT); + + if (read_data(client, PIXART_PAT9125_MISC2_REG) == 0x04) { + ots_write_read(client, PIXART_PAT9125_MISC2_REG, 0x08); + if (read_data(client, PIXART_PAT9125_MISC1_REG) == 0x10) + ots_write_read(client, PIXART_PAT9125_MISC1_REG, + 0x19); + } + /* enable write protect */ + ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG, + PIXART_PAT9125_ENABLE_WRITE_PROTECT); + } + return read_id_ok; +} diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.h b/drivers/input/misc/ots_pat9125/pixart_ots.h new file mode 100644 index 000000000000..a66ded5c9d08 --- /dev/null +++ b/drivers/input/misc/ots_pat9125/pixart_ots.h @@ -0,0 +1,45 @@ +/* drivers/input/misc/ots_pat9125/pixart_ots.h + * + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + */ + +#ifndef __PIXART_OTS_H_ +#define __PIXART_OTS_H_ + +#define PAT9125_DEV_NAME "pixart_pat9125" +#define MAX_BUF_SIZE 20 +#define RESET_DELAY_US 1000 + +/* Register addresses */ +#define PIXART_PAT9125_PRODUCT_ID1_REG 0x00 +#define PIXART_PAT9125_PRODUCT_ID2_REG 0x01 +#define PIXART_PAT9125_MOTION_STATUS_REG 0x02 +#define PIXART_PAT9125_DELTA_X_LO_REG 0x03 +#define PIXART_PAT9125_DELTA_Y_LO_REG 0x04 +#define PIXART_PAT9125_CONFIG_REG 0x06 +#define PIXART_PAT9125_WRITE_PROTECT_REG 0x09 +#define PIXART_PAT9125_SET_CPI_RES_X_REG 0x0D +#define PIXART_PAT9125_SET_CPI_RES_Y_REG 0x0E +#define PIXART_PAT9125_DELTA_XY_HI_REG 0x12 +#define PIXART_PAT9125_ORIENTATION_REG 0x19 +#define PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG 0x4B +#define PIXART_PAT9125_SELECT_BANK_REG 0x7F +#define PIXART_PAT9125_MISC1_REG 0x5D +#define PIXART_PAT9125_MISC2_REG 0x5E +/*Register configuration data */ +#define PIXART_PAT9125_SENSOR_ID 0x31 +#define PIXART_PAT9125_RESET 0x97 +#define PIXART_PAT9125_MOTION_DATA_LENGTH 0x04 +#define PIXART_PAT9125_BANK0 0x00 +#define PIXART_PAT9125_DISABLE_WRITE_PROTECT 0x5A +#define PIXART_PAT9125_ENABLE_WRITE_PROTECT 0x00 +#define PIXART_PAT9125_CPI_RESOLUTION_X 0x65 +#define PIXART_PAT9125_CPI_RESOLUTION_Y 0xFF +#define PIXART_PAT9125_LOW_VOLTAGE_SEGMENT 0x04 +#define PIXART_PAT9125_VALID_MOTION_DATA 0x80 + +/* Export functions */ +bool ots_sensor_init(struct i2c_client *); + +#endif diff --git a/drivers/input/misc/ots_pat9125/pixart_platform.h b/drivers/input/misc/ots_pat9125/pixart_platform.h new file mode 100644 index 000000000000..1fe448fdc2cb --- /dev/null +++ b/drivers/input/misc/ots_pat9125/pixart_platform.h @@ -0,0 +1,17 @@ +/* drivers/input/misc/ots_pat9125/pixart_platform.h + * + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + */ + +#ifndef __PIXART_PLATFORM_H_ +#define __PIXART_PLATFORM_H_ + +#include <linux/i2c.h> +#include <linux/delay.h> + +/* extern functions */ +extern unsigned char read_data(struct i2c_client *, u8 addr); +extern void write_data(struct i2c_client *, u8 addr, u8 data); + +#endif diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index c69927bd4ff2..afa519aa8203 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1888,8 +1888,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) return NULL; smmu_domain->secure_vmid = VMID_INVAL; - /* disable coherent htw by default */ - smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE); INIT_LIST_HEAD(&smmu_domain->pte_info_list); INIT_LIST_HEAD(&smmu_domain->unassign_list); INIT_LIST_HEAD(&smmu_domain->secure_pool_list); @@ -2263,15 +2261,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) goto err_destroy_domain_context; } - if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE)) - && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) { - dev_err(dev, - "Can't attach: this domain wants coherent htw but %s doesn't support it\n", - dev_name(smmu_domain->smmu->dev)); - ret = -EINVAL; - goto err_destroy_domain_context; - } - /* Looks ok, so add the device to the domain */ ret = arm_smmu_domain_add_master(smmu_domain, cfg); if (ret) @@ -2977,11 +2966,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); ret = 0; break; - case DOMAIN_ATTR_COHERENT_HTW_DISABLE: - *((int *)data) = !!(smmu_domain->attributes & - (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE)); - ret = 0; - break; case DOMAIN_ATTR_SECURE_VMID: *((int *)data) = smmu_domain->secure_vmid; ret = 0; @@ -3083,29 +3067,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, else smmu_domain->stage = ARM_SMMU_DOMAIN_S1; break; - case DOMAIN_ATTR_COHERENT_HTW_DISABLE: - { - struct arm_smmu_device *smmu; - int htw_disable = *((int *)data); - - smmu = smmu_domain->smmu; - - if (smmu && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - && !htw_disable) { - dev_err(smmu->dev, - "Can't enable coherent htw on this domain: this SMMU doesn't support it\n"); - ret = -EINVAL; - goto out_unlock; - } - - if (htw_disable) - smmu_domain->attributes |= - (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE); - else - smmu_domain->attributes &= - ~(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE); - break; - } case DOMAIN_ATTR_SECURE_VMID: BUG_ON(smmu_domain->secure_vmid != VMID_INVAL); smmu_domain->secure_vmid = *((int *)data); diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index ea8db1a431d0..266f7065fca4 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -649,7 +649,7 @@ err: int fast_smmu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) { - int htw_disable = 1, atomic_domain = 1; + int atomic_domain = 1; struct iommu_domain *domain = mapping->domain; struct iommu_pgtbl_info info; size_t size = mapping->bits << PAGE_SHIFT; @@ -657,10 +657,6 @@ int fast_smmu_attach_device(struct device *dev, if (mapping->base + size > (SZ_1G * 4ULL)) return -EINVAL; - if (iommu_domain_set_attr(domain, DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &htw_disable)) - return -EINVAL; - if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC, &atomic_domain)) return -EINVAL; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a892b73a4288..5e47e2481300 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -481,9 +481,11 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO : ARM_LPAE_PTE_AP_RO; - if (prot & IOMMU_CACHE) + if (prot & IOMMU_CACHE) { pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); + pte |= ARM_LPAE_PTE_SH_OS; + } if (prot & IOMMU_DEVICE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV << @@ -935,9 +937,14 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) return NULL; /* TCR */ - reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | - (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT); + if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent) + reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); + else + reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) | + (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT); switch (1 << data->pg_shift) { case SZ_4K: diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c index a0227fd05939..3b54fd4a77e6 100644 --- a/drivers/iommu/iommu-debug.c +++ b/drivers/iommu/iommu-debug.c @@ -48,8 +48,6 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr) return "DOMAIN_ATTR_FSL_PAMUV1"; case DOMAIN_ATTR_NESTING: return "DOMAIN_ATTR_NESTING"; - case DOMAIN_ATTR_COHERENT_HTW_DISABLE: - return "DOMAIN_ATTR_COHERENT_HTW_DISABLE"; case DOMAIN_ATTR_PT_BASE_ADDR: return "DOMAIN_ATTR_PT_BASE_ADDR"; case DOMAIN_ATTR_SECURE_VMID: @@ -96,7 +94,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored) { struct iommu_debug_attachment *attach = s->private; phys_addr_t pt_phys; - int coherent_htw_disable; int secure_vmid; seq_printf(s, "Domain: 0x%p\n", attach->domain); @@ -110,14 +107,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored) pt_virt, &pt_phys); } - seq_puts(s, "COHERENT_HTW_DISABLE: "); - if (iommu_domain_get_attr(attach->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &coherent_htw_disable)) - seq_puts(s, "(Unknown)\n"); - else - seq_printf(s, "%d\n", coherent_htw_disable); - seq_puts(s, "SECURE_VMID: "); if (iommu_domain_get_attr(attach->domain, DOMAIN_ATTR_SECURE_VMID, @@ -733,7 +722,6 @@ static int iommu_debug_profiling_show(struct seq_file *s, void *ignored) const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, SZ_1M * 20, 0 }; enum iommu_attr attrs[] = { - DOMAIN_ATTR_COHERENT_HTW_DISABLE, DOMAIN_ATTR_ATOMIC, }; int htw_disable = 1, atomic = 1; @@ -764,7 +752,6 @@ static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored) SZ_1M * 20, 0 }; enum iommu_attr attrs[] = { - DOMAIN_ATTR_COHERENT_HTW_DISABLE, DOMAIN_ATTR_ATOMIC, DOMAIN_ATTR_SECURE_VMID, }; @@ -797,7 +784,6 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored) size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0}; enum iommu_attr attrs[] = { DOMAIN_ATTR_FAST, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, DOMAIN_ATTR_ATOMIC, }; int one = 1; @@ -1507,7 +1493,6 @@ static const struct file_operations iommu_debug_functional_arm_dma_api_fops = { static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev, int val, bool is_secure) { - int htw_disable = 1; struct bus_type *bus; bus = msm_iommu_get_bus(ddev->dev); @@ -1520,13 +1505,6 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev, return -ENOMEM; } - if (iommu_domain_set_attr(ddev->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &htw_disable)) { - pr_err("Couldn't disable coherent htw\n"); - goto out_domain_free; - } - if (is_secure && iommu_domain_set_attr(ddev->domain, DOMAIN_ATTR_SECURE_VMID, &val)) { diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c index 334f4e95c068..25fe36ab6339 100644 --- a/drivers/iommu/msm_dma_iommu_mapping.c +++ b/drivers/iommu/msm_dma_iommu_mapping.c @@ -17,6 +17,7 @@ #include <linux/rbtree.h> #include <linux/mutex.h> #include <linux/err.h> +#include <asm/barrier.h> #include <linux/msm_dma_iommu_mapping.h> @@ -216,10 +217,13 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg, sg->dma_length = iommu_map->sgl.dma_length; kref_get(&iommu_map->ref); - /* - * Need to do cache operations here based on "dir" in the - * future if we go with coherent mappings. - */ + if (is_device_dma_coherent(dev)) + /* + * Ensure all outstanding changes for coherent + * buffers are applied to the cache before any + * DMA occurs. + */ + dmb(ish); ret = nents; } mutex_unlock(&iommu_meta->lock); @@ -376,6 +380,7 @@ int msm_dma_unmap_all_for_dev(struct device *dev) return ret; } +EXPORT_SYMBOL(msm_dma_unmap_all_for_dev); /* * Only to be called by ION code when a buffer is freed diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c index 03a61407aef8..feede3a14e07 100644 --- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c +++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c @@ -1427,7 +1427,6 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb, struct device *dev) { int rc = 0; - int disable_htw = 1; if (!cb || !dev) { pr_err("Error: invalid input params\n"); @@ -1465,21 +1464,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb, goto end; } - /* - * Set the domain attributes - * disable L2 redirect since it decreases - * performance - */ - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw)) { - pr_err("Error: couldn't disable coherent HTW\n"); - rc = -ENODEV; - goto err_set_attr; - } return 0; -err_set_attr: - arm_iommu_release_mapping(cb->mapping); end: return rc; } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 4a32de1a85bf..7488f371545b 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -1104,7 +1104,6 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg) uint32_t io_format = 0; struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg; struct msm_vfe_axi_stream *stream_info; - unsigned long flags; if (stream_cfg_cmd->stream_src >= VFE_AXI_SRC_MAX) { pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__, @@ -1114,12 +1113,9 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg) stream_info = msm_isp_get_stream_common_data(vfe_dev, stream_cfg_cmd->stream_src); - spin_lock_irqsave(&stream_info->lock, flags); - rc = msm_isp_axi_create_stream(vfe_dev, &vfe_dev->axi_data, stream_cfg_cmd, stream_info); if (rc) { - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: create stream failed\n", __func__); return rc; } @@ -1128,7 +1124,6 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg) vfe_dev, stream_info, stream_cfg_cmd); if (rc) { msm_isp_axi_destroy_stream(vfe_dev, stream_info); - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: Request validation failed\n", __func__); return rc; } @@ -1236,7 +1231,6 @@ done: msm_isp_axi_free_wm(vfe_dev, stream_info); msm_isp_axi_destroy_stream(vfe_dev, stream_info); } - spin_unlock_irqrestore(&stream_info->lock, flags); return rc; } @@ -1247,7 +1241,6 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg) struct msm_vfe_axi_stream *stream_info; struct msm_vfe_axi_stream_cfg_cmd stream_cfg; int vfe_idx; - unsigned long flags; if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >= VFE_AXI_SRC_MAX) { @@ -1257,13 +1250,10 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg) stream_info = msm_isp_get_stream_common_data(vfe_dev, HANDLE_TO_IDX(stream_release_cmd->stream_handle)); - spin_lock_irqsave(&stream_info->lock, flags); - vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info); if (vfe_idx == -ENOTTY || stream_release_cmd->stream_handle != stream_info->stream_handle[vfe_idx]) { - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: Invalid stream %p handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n", __func__, stream_info, stream_release_cmd->stream_handle, @@ -1277,9 +1267,7 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg) stream_cfg.cmd = STOP_STREAM; stream_cfg.num_streams = 1; stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle; - spin_unlock_irqrestore(&stream_info->lock, flags); msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg); - spin_lock_irqsave(&stream_info->lock, flags); } for (i = 0; i < stream_info->num_planes; i++) { @@ -1297,7 +1285,6 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg) msm_isp_axi_free_wm(vfe_dev, stream_info); msm_isp_axi_destroy_stream(vfe_dev, stream_info); - spin_unlock_irqrestore(&stream_info->lock, flags); return rc; } diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index 923cd9aa5954..e226f7e40a07 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -460,7 +460,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg) struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg; struct msm_vfe_stats_stream *stream_info = NULL; uint32_t stats_idx; - unsigned long flags; stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops. get_stats_idx(stream_req_cmd->stats_type); @@ -472,11 +471,8 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg) stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx); - spin_lock_irqsave(&stream_info->lock, flags); - rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd, stream_info); if (rc < 0) { - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: create stream failed\n", __func__); return rc; } @@ -491,7 +487,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg) msm_isp_stats_cfg_stream_scratch(stream_info, VFE_PONG_FLAG); } - spin_unlock_irqrestore(&stream_info->lock, flags); return rc; } @@ -505,7 +500,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) int vfe_idx; int i; int k; - unsigned long flags; if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) { pr_err("%s Invalid stats index %d", __func__, stats_idx); @@ -513,12 +507,10 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) } stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx); - spin_lock_irqsave(&stream_info->lock, flags); vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user( vfe_dev, stream_info); if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] != stream_release_cmd->stream_handle) { - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: Invalid stream handle %x, expected %x\n", __func__, stream_release_cmd->stream_handle, vfe_idx != -ENOTTY ? @@ -526,7 +518,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) return -EINVAL; } if (stream_info->state == STATS_AVAILABLE) { - spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: stream already release\n", __func__); return rc; } @@ -537,9 +528,7 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) stream_cfg_cmd.num_streams = 1; stream_cfg_cmd.stream_handle[0] = stream_release_cmd->stream_handle; - spin_unlock_irqrestore(&stream_info->lock, flags); msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd); - spin_lock_irqsave(&stream_info->lock, flags); } for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) { @@ -556,7 +545,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg) if (stream_info->num_isp == 0) stream_info->state = STATS_AVAILABLE; - spin_unlock_irqrestore(&stream_info->lock, flags); return 0; } diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c index 3ac4c3af3208..258e08c1b34f 100644 --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c @@ -114,6 +114,13 @@ static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev, bool status); static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev, uint32_t buff_mgr_ops, uint32_t id, void *arg); +static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev, + uint8_t enable, uint32_t irq_mask); +static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev, + int queue_len); +static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info); +static int msm_cpp_dump_addr(struct cpp_device *cpp_dev, + struct msm_cpp_frame_info_t *frame_info); #if CONFIG_MSM_CPP_DBG #define CPP_DBG(fmt, args...) pr_err(fmt, ##args) @@ -636,6 +643,127 @@ static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base) return rc; } +static int msm_cpp_dump_addr(struct cpp_device *cpp_dev, + struct msm_cpp_frame_info_t *frame_info) +{ + int32_t s_base, p_base; + uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off; + uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off; + uint32_t rd_ref_off, wr_ref_off; + uint32_t s_size, p_size; + uint8_t tnr_enabled, ubwc_enabled, cds_en; + int32_t i = 0; + uint32_t *cpp_frame_msg; + + cpp_frame_msg = frame_info->cpp_cmd_msg; + + /* Update stripe/plane size and base offsets */ + s_base = cpp_dev->payload_params.stripe_base; + s_size = cpp_dev->payload_params.stripe_size; + p_base = cpp_dev->payload_params.plane_base; + p_size = cpp_dev->payload_params.plane_size; + + /* Fetch engine Offset */ + rd_off = cpp_dev->payload_params.rd_pntr_off; + /* Write engine offsets */ + wr0_off = cpp_dev->payload_params.wr_0_pntr_off; + wr1_off = wr0_off + 1; + wr2_off = wr1_off + 1; + wr3_off = wr2_off + 1; + /* Reference engine offsets */ + rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off; + wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off; + /* Meta data offsets */ + wr0_mdata_off = + cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off; + wr1_mdata_off = (wr0_mdata_off + 1); + wr2_mdata_off = (wr1_mdata_off + 1); + wr3_mdata_off = (wr2_mdata_off + 1); + + tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2); + ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5); + cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6); + + for (i = 0; i < frame_info->num_strips; i++) { + pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n", + i, cpp_frame_msg[s_base + rd_off + i * s_size], + cpp_frame_msg[s_base + wr0_off + i * s_size], + cpp_frame_msg[s_base + wr1_off + i * s_size], + cpp_frame_msg[s_base + wr2_off + i * s_size], + cpp_frame_msg[s_base + wr3_off + i * s_size]); + + if (tnr_enabled) { + pr_err("stripe %d: read_ref %x, write_ref %x\n", i, + cpp_frame_msg[s_base + rd_ref_off + i * s_size], + cpp_frame_msg[s_base + wr_ref_off + i * s_size] + ); + } + + if (cds_en) { + pr_err("stripe %d:, dsdn_off %x\n", i, + cpp_frame_msg[s_base + rd_ref_off + i * s_size] + ); + } + + if (ubwc_enabled) { + pr_err("stripe %d: metadata %x, %x, %x, %x\n", i, + cpp_frame_msg[s_base + wr0_mdata_off + + i * s_size], + cpp_frame_msg[s_base + wr1_mdata_off + + i * s_size], + cpp_frame_msg[s_base + wr2_mdata_off + + i * s_size], + cpp_frame_msg[s_base + wr3_mdata_off + + i * s_size] + ); + } + + } + return 0; +} + +static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain, + struct device *dev, unsigned long iova, int flags, void *token) +{ + struct cpp_device *cpp_dev = NULL; + struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME]; + int32_t i = 0, queue_len = 0; + struct msm_device_queue *queue = NULL; + + if (token) { + cpp_dev = token; + disable_irq(cpp_dev->irq->start); + if (atomic_read(&cpp_timer.used)) { + atomic_set(&cpp_timer.used, 0); + del_timer_sync(&cpp_timer.cpp_timer); + } + mutex_lock(&cpp_dev->mutex); + tasklet_kill(&cpp_dev->cpp_tasklet); + cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin); + queue = &cpp_timer.data.cpp_dev->processing_q; + queue_len = queue->len; + if (!queue_len) { + pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__); + msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8); + mutex_unlock(&cpp_dev->mutex); + return; + } + for (i = 0; i < queue_len; i++) { + if (cpp_timer.data.processed_frame[i]) { + processed_frame[i] = + cpp_timer.data.processed_frame[i]; + pr_err("Fault on identity=0x%x, frame_id=%03d\n", + processed_frame[i]->identity, + processed_frame[i]->frame_id); + msm_cpp_dump_addr(cpp_dev, processed_frame[i]); + msm_cpp_dump_frame_cmd(processed_frame[i]); + } + } + msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len); + msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8); + mutex_unlock(&cpp_dev->mutex); + } +} static int cpp_init_mem(struct cpp_device *cpp_dev) { @@ -652,6 +780,9 @@ static int cpp_init_mem(struct cpp_device *cpp_dev) return -ENODEV; cpp_dev->iommu_hdl = iommu_hdl; + cam_smmu_reg_client_page_fault_handler( + cpp_dev->iommu_hdl, + msm_cpp_iommu_fault_handler, cpp_dev); return 0; } diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c index 8f911d362477..a4ee5041bfff 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c @@ -276,6 +276,12 @@ int32_t msm_camera_cci_i2c_write_seq_table( client_addr_type = client->addr_type; client->addr_type = write_setting->addr_type; + if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) { + pr_err("%s: number of bytes %u exceeding the max supported %d\n", + __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX); + return rc; + } + for (i = 0; i < write_setting->size; i++) { rc = msm_camera_cci_i2c_write_seq(client, reg_setting->reg_addr, reg_setting->reg_data, reg_setting->reg_data_size); diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c index 3b101798edac..7a0fb97061d5 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c +++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c @@ -290,6 +290,12 @@ int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client, client_addr_type = client->addr_type; client->addr_type = write_setting->addr_type; + if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) { + pr_err("%s: number of bytes %u exceeding the max supported %d\n", + __func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX); + return rc; + } + for (i = 0; i < write_setting->size; i++) { rc = msm_camera_qup_i2c_write_seq(client, reg_setting->reg_addr, reg_setting->reg_data, reg_setting->reg_data_size); diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c index 43aadffa2983..86e7837cc02a 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c @@ -1167,6 +1167,7 @@ static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl) if (!s_ctrl->msm_sensor_mutex) { pr_err("failed: no memory msm_sensor_mutex %pK", s_ctrl->msm_sensor_mutex); + rc = -ENOMEM; goto FREE_SENSOR_I2C_CLIENT; } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c index 930b18abd71b..b88f03ce89ae 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c @@ -1123,6 +1123,13 @@ static int sde_rotator_try_fmt_vid_cap(struct file *file, struct sde_rotation_config config; int ret; + if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) { + SDEDEV_WARN(ctx->rot_dev->dev, + "Not supporting 0 width/height: %dx%d\n", + f->fmt.pix.width, f->fmt.pix.height); + return -EINVAL; + } + sde_rot_mgr_lock(rot_dev->mgr); sde_rotator_get_config_from_ctx(ctx, &config); config.output.format = f->fmt.pix.pixelformat; @@ -1162,6 +1169,13 @@ static int sde_rotator_try_fmt_vid_out(struct file *file, struct sde_rotation_config config; int ret; + if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) { + SDEDEV_WARN(ctx->rot_dev->dev, + "Not supporting 0 width/height: %dx%d\n", + f->fmt.pix.width, f->fmt.pix.height); + return -EINVAL; + } + sde_rot_mgr_lock(rot_dev->mgr); sde_rotator_get_config_from_ctx(ctx, &config); config.input.format = f->fmt.pix.pixelformat; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c index 7bbd8aa53342..c11c4b61d832 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c @@ -448,7 +448,6 @@ int sde_smmu_probe(struct platform_device *pdev) struct sde_smmu_domain smmu_domain; const struct of_device_id *match; struct sde_module_power *mp; - int disable_htw = 1; char name[MAX_CLIENT_NAME_LEN]; if (!mdata) { @@ -535,13 +534,6 @@ int sde_smmu_probe(struct platform_device *pdev) goto disable_power; } - rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - if (rc) { - SDEROT_ERR("couldn't disable coherent HTW\n"); - goto release_mapping; - } - if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) { int secure_vmid = VMID_CP_PIXEL; diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c index eed177ea5bab..60c4c81eddf2 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c @@ -355,13 +355,6 @@ int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h, chroma_samp = fmt->chroma_sample; - if (rotation) { - if (chroma_samp == SDE_MDP_CHROMA_H2V1) - chroma_samp = SDE_MDP_CHROMA_H1V2; - else if (chroma_samp == SDE_MDP_CHROMA_H1V2) - chroma_samp = SDE_MDP_CHROMA_H2V1; - } - sde_mdp_get_v_h_subsample_rate(chroma_samp, &v_subsample, &h_subsample); diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index fdf6e1b1c5d0..becea0c59521 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -626,6 +626,11 @@ static u32 get_frame_size_compressed(int plane, return (max_mbs_per_frame * size_per_mb * 3/2)/2; } +static u32 get_frame_size_nv12_ubwc_10bit(int plane, u32 height, u32 width) +{ + return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height); +} + static u32 get_frame_size(struct msm_vidc_inst *inst, const struct msm_vidc_format *fmt, int fmt_type, int plane) @@ -662,7 +667,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst, int rc = 0; switch (ctrl->id) { case V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT: - if (inst->fmts[OUTPUT_PORT]->fourcc != V4L2_PIX_FMT_H264_MVC) { + if (inst->fmts[OUTPUT_PORT].fourcc != V4L2_PIX_FMT_H264_MVC) { dprintk(VIDC_ERR, "Control %#x only valid for MVC\n", ctrl->id); rc = -ENOTSUPP; @@ -670,7 +675,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst, } break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: - if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC && + if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC && ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) { dprintk(VIDC_ERR, "Profile %#x not supported for MVC\n", @@ -680,7 +685,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst, } break; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: - if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC && + if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC && ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) { dprintk(VIDC_ERR, "Level %#x not supported for MVC\n", ctrl->val); @@ -712,6 +717,14 @@ struct msm_vidc_format vdec_formats[] = { .type = CAPTURE_PORT, }, { + .name = "UBWC YCbCr Semiplanar 4:2:0 10bit", + .description = "UBWC Y/CbCr 4:2:0 10bit", + .fourcc = V4L2_PIX_FMT_NV12_TP10_UBWC, + .num_planes = 2, + .get_frame_size = get_frame_size_nv12_ubwc_10bit, + .type = CAPTURE_PORT, + }, + { .name = "Mpeg4", .description = "Mpeg4 compressed format", .fourcc = V4L2_PIX_FMT_MPEG4, @@ -883,10 +896,10 @@ int msm_vdec_prepare_buf(struct msm_vidc_inst *inst, case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) { + if (b->length != inst->fmts[CAPTURE_PORT].num_planes) { dprintk(VIDC_ERR, "Planes mismatch: needed: %d, allocated: %d\n", - inst->fmts[CAPTURE_PORT]->num_planes, + inst->fmts[CAPTURE_PORT].num_planes, b->length); rc = -EINVAL; break; @@ -962,10 +975,10 @@ int msm_vdec_release_buf(struct msm_vidc_inst *inst, case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) { + if (b->length != inst->fmts[CAPTURE_PORT].num_planes) { dprintk(VIDC_ERR, "Planes mismatch: needed: %d, to release: %d\n", - inst->fmts[CAPTURE_PORT]->num_planes, b->length); + inst->fmts[CAPTURE_PORT].num_planes, b->length); rc = -EINVAL; break; } @@ -1086,9 +1099,9 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) hdev = inst->core->device; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) - fmt = inst->fmts[CAPTURE_PORT]; + fmt = &inst->fmts[CAPTURE_PORT]; else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) - fmt = inst->fmts[OUTPUT_PORT]; + fmt = &inst->fmts[OUTPUT_PORT]; else return -ENOTSUPP; @@ -1237,6 +1250,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) rc = -EINVAL; goto err_invalid_fmt; } + memcpy(&inst->fmts[fmt->type], fmt, + sizeof(struct msm_vidc_format)); inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width; inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height; @@ -1244,7 +1259,6 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) msm_comm_get_hal_output_buffer(inst), f->fmt.pix_mp.pixelformat); - inst->fmts[fmt->type] = fmt; if (msm_comm_get_stream_output_mode(inst) == HAL_VIDEO_DECODER_SECONDARY) { frame_sz.buffer_type = HAL_BUFFER_OUTPUT2; @@ -1259,10 +1273,10 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) } f->fmt.pix_mp.plane_fmt[0].sizeimage = - fmt->get_frame_size(0, + inst->fmts[fmt->type].get_frame_size(0, f->fmt.pix_mp.height, f->fmt.pix_mp.width); - extra_idx = EXTRADATA_IDX(fmt->num_planes); + extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes); if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage = VENUS_EXTRADATA_SIZE( @@ -1270,8 +1284,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) inst->prop.width[CAPTURE_PORT]); } - f->fmt.pix_mp.num_planes = fmt->num_planes; - for (i = 0; i < fmt->num_planes; ++i) { + f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes; + for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) { inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; } @@ -1290,6 +1304,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) rc = -EINVAL; goto err_invalid_fmt; } + memcpy(&inst->fmts[fmt->type], fmt, + sizeof(struct msm_vidc_format)); rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE); if (rc) { @@ -1297,17 +1313,16 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) goto err_invalid_fmt; } - if (!(get_hal_codec(fmt->fourcc) & + if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) & inst->core->dec_codec_supported)) { dprintk(VIDC_ERR, "Codec(%#x) is not present in the supported codecs list(%#x)\n", - get_hal_codec(fmt->fourcc), + get_hal_codec(inst->fmts[fmt->type].fourcc), inst->core->dec_codec_supported); rc = -EINVAL; goto err_invalid_fmt; } - inst->fmts[fmt->type] = fmt; rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); if (rc) { dprintk(VIDC_ERR, "Failed to open instance\n"); @@ -1330,14 +1345,15 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) frame_sz.height); msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz); - max_input_size = get_frame_size(inst, fmt, f->type, 0); + max_input_size = get_frame_size( +inst, &inst->fmts[fmt->type], f->type, 0); if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size || !f->fmt.pix_mp.plane_fmt[0].sizeimage) { f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size; } - f->fmt.pix_mp.num_planes = fmt->num_planes; - for (i = 0; i < fmt->num_planes; ++i) { + f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes; + for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) { inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage; } @@ -1451,20 +1467,20 @@ static int msm_vdec_queue_setup(struct vb2_queue *q, switch (q->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: - *num_planes = inst->fmts[OUTPUT_PORT]->num_planes; + *num_planes = inst->fmts[OUTPUT_PORT].num_planes; if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS || *num_buffers > MAX_NUM_OUTPUT_BUFFERS) *num_buffers = MIN_NUM_OUTPUT_BUFFERS; for (i = 0; i < *num_planes; i++) { sizes[i] = get_frame_size(inst, - inst->fmts[OUTPUT_PORT], q->type, i); + &inst->fmts[OUTPUT_PORT], q->type, i); } rc = set_actual_buffer_count(inst, *num_buffers, HAL_BUFFER_INPUT); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n"); - *num_planes = inst->fmts[CAPTURE_PORT]->num_planes; + *num_planes = inst->fmts[CAPTURE_PORT].num_planes; rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); if (rc) { dprintk(VIDC_ERR, "Failed to open instance\n"); @@ -1549,7 +1565,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q, } extra_idx = - EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes); + EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes); if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { sizes[extra_idx] = VENUS_EXTRADATA_SIZE( @@ -1650,7 +1666,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst) unsigned int buffer_size; struct msm_vidc_format *fmt = NULL; - fmt = inst->fmts[CAPTURE_PORT]; + fmt = &inst->fmts[CAPTURE_PORT]; buffer_size = fmt->get_frame_size(0, inst->prop.height[CAPTURE_PORT], inst->prop.width[CAPTURE_PORT]); @@ -1872,8 +1888,6 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst) dprintk(VIDC_ERR, "Invalid input = %pK\n", inst); return -EINVAL; } - inst->fmts[OUTPUT_PORT] = &vdec_formats[2]; - inst->fmts[CAPTURE_PORT] = &vdec_formats[0]; inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT; inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH; inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT; @@ -1889,6 +1903,10 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst) inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC; inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC; inst->prop.fps = DEFAULT_FPS; + memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2], + sizeof(struct msm_vidc_format)); + memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0], + sizeof(struct msm_vidc_format)); return rc; } diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index 99f30d9cb97b..9c855e89e3ff 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -1485,7 +1485,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q, default: break; } - inst->fmts[CAPTURE_PORT]->num_planes = *num_planes; + inst->fmts[CAPTURE_PORT].num_planes = *num_planes; for (i = 0; i < *num_planes; i++) { int extra_idx = EXTRADATA_IDX(*num_planes); @@ -1560,7 +1560,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q, break; } - inst->fmts[OUTPUT_PORT]->num_planes = *num_planes; + inst->fmts[OUTPUT_PORT].num_planes = *num_planes; rc = call_hfi_op(hdev, session_set_property, inst->session, property_id, &new_buf_count); if (rc) @@ -1570,12 +1570,12 @@ static int msm_venc_queue_setup(struct vb2_queue *q, inst->buff_req.buffer[0].buffer_size, inst->buff_req.buffer[0].buffer_alignment, inst->buff_req.buffer[0].buffer_count_actual); - sizes[0] = inst->fmts[OUTPUT_PORT]->get_frame_size( + sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size( 0, inst->prop.height[OUTPUT_PORT], inst->prop.width[OUTPUT_PORT]); extra_idx = - EXTRADATA_IDX(inst->fmts[OUTPUT_PORT]->num_planes); + EXTRADATA_IDX(inst->fmts[OUTPUT_PORT].num_planes); if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) { buff_req_buffer = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT); @@ -1610,7 +1610,7 @@ static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers) return -EINVAL; } - if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_VP8) + if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8) return 0; num_enh_layers = layers ? : 0; @@ -2177,10 +2177,10 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) switch (ctrl->id) { case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD: - if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_H264 && - inst->fmts[CAPTURE_PORT]->fourcc != + if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 && + inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264_NO_SC && - inst->fmts[CAPTURE_PORT]->fourcc != + inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) { dprintk(VIDC_ERR, "Control %#x only valid for H264 and HEVC\n", @@ -2669,8 +2669,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: { bool codec_avc = - inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264 || - inst->fmts[CAPTURE_PORT]->fourcc == + inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 || + inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264_NO_SC; temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE); @@ -2696,8 +2696,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS); is_cont_intra_supported = - (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264) || - (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_HEVC); + (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) || + (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC); if (is_cont_intra_supported) { if (ctrl->val != HAL_INTRA_REFRESH_NONE) @@ -3054,7 +3054,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS: - if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_HEVC) { + if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) { dprintk(VIDC_ERR, "Hier B supported for HEVC only\n"); rc = -ENOTSUPP; break; @@ -3483,8 +3483,6 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst) dprintk(VIDC_ERR, "Invalid input = %pK\n", inst); return -EINVAL; } - inst->fmts[CAPTURE_PORT] = &venc_formats[4]; - inst->fmts[OUTPUT_PORT] = &venc_formats[0]; inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT; inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH; inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT; @@ -3501,6 +3499,10 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst) inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC; inst->prop.fps = DEFAULT_FPS; inst->capability.pixelprocess_capabilities = 0; + memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4], + sizeof(struct msm_vidc_format)); + memcpy(&inst->fmts[OUTPUT_PORT], &venc_formats[0], + sizeof(struct msm_vidc_format)); return rc; } @@ -3624,7 +3626,8 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) goto exit; } - inst->fmts[fmt->type] = fmt; + memcpy(&inst->fmts[fmt->type], fmt, + sizeof(struct msm_vidc_format)); rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); if (rc) { @@ -3676,7 +3679,8 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) rc = -EINVAL; goto exit; } - inst->fmts[fmt->type] = fmt; + memcpy(&inst->fmts[fmt->type], fmt, + sizeof(struct msm_vidc_format)); msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc); } else { @@ -3686,7 +3690,7 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) goto exit; } - f->fmt.pix_mp.num_planes = fmt->num_planes; + f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { struct hal_frame_size frame_sz = {0}; @@ -3717,12 +3721,12 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) struct hal_buffer_requirements *bufreq = NULL; int extra_idx = 0; - for (i = 0; i < fmt->num_planes; ++i) { + for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) { f->fmt.pix_mp.plane_fmt[i].sizeimage = - fmt->get_frame_size(i, + inst->fmts[fmt->type].get_frame_size(i, f->fmt.pix_mp.height, f->fmt.pix_mp.width); } - extra_idx = EXTRADATA_IDX(fmt->num_planes); + extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes); if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) { bufreq = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_INPUT); @@ -3757,11 +3761,11 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - fmt = inst->fmts[CAPTURE_PORT]; + fmt = &inst->fmts[CAPTURE_PORT]; height = inst->prop.height[CAPTURE_PORT]; width = inst->prop.width[CAPTURE_PORT]; } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - fmt = inst->fmts[OUTPUT_PORT]; + fmt = &inst->fmts[OUTPUT_PORT]; height = inst->prop.height[OUTPUT_PORT]; width = inst->prop.width[OUTPUT_PORT]; } else { @@ -3864,10 +3868,10 @@ int msm_venc_prepare_buf(struct msm_vidc_inst *inst, case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: - if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) { + if (b->length != inst->fmts[CAPTURE_PORT].num_planes) { dprintk(VIDC_ERR, "Planes mismatch: needed: %d, allocated: %d\n", - inst->fmts[CAPTURE_PORT]->num_planes, + inst->fmts[CAPTURE_PORT].num_planes, b->length); rc = -EINVAL; break; @@ -3935,10 +3939,10 @@ int msm_venc_release_buf(struct msm_vidc_inst *inst, break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: { if (b->length != - inst->fmts[CAPTURE_PORT]->num_planes) { + inst->fmts[CAPTURE_PORT].num_planes) { dprintk(VIDC_ERR, "Planes mismatch: needed: %d, to release: %d\n", - inst->fmts[CAPTURE_PORT]->num_planes, + inst->fmts[CAPTURE_PORT].num_planes, b->length); rc = -EINVAL; break; diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 437ad43e23e9..b12eeddc678f 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -682,7 +682,7 @@ static bool valid_v4l2_buffer(struct v4l2_buffer *b, MAX_PORT_NUM; return port != MAX_PORT_NUM && - inst->fmts[port]->num_planes == b->length; + inst->fmts[port].num_planes == b->length; } int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b) @@ -849,7 +849,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) dprintk(VIDC_DBG, "Queueing device address = %pa\n", &binfo->device_addr[i]); - if (inst->fmts[OUTPUT_PORT]->fourcc == + if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] && b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { rc = msm_comm_smem_cache_operations(inst, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 40643239712f..d1cc08d53017 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -521,12 +521,12 @@ static int msm_comm_vote_bus(struct msm_vidc_core *core) struct v4l2_control ctrl; codec = inst->session_type == MSM_VIDC_DECODER ? - inst->fmts[OUTPUT_PORT]->fourcc : - inst->fmts[CAPTURE_PORT]->fourcc; + inst->fmts[OUTPUT_PORT].fourcc : + inst->fmts[CAPTURE_PORT].fourcc; yuv = inst->session_type == MSM_VIDC_DECODER ? - inst->fmts[CAPTURE_PORT]->fourcc : - inst->fmts[OUTPUT_PORT]->fourcc; + inst->fmts[CAPTURE_PORT].fourcc : + inst->fmts[OUTPUT_PORT].fourcc; vote_data[i].domain = get_hal_domain(inst->session_type); vote_data[i].codec = get_hal_codec(codec); @@ -1004,8 +1004,8 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data) core = inst->core; hdev = inst->core->device; codec = inst->session_type == MSM_VIDC_DECODER ? - inst->fmts[OUTPUT_PORT]->fourcc : - inst->fmts[CAPTURE_PORT]->fourcc; + inst->fmts[OUTPUT_PORT].fourcc : + inst->fmts[CAPTURE_PORT].fourcc; /* check if capabilities are available for this session */ for (i = 0; i < VIDC_MAX_SESSIONS; i++) { @@ -2028,7 +2028,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data) ns_to_timeval(time_usec * NSEC_PER_USEC); vbuf->flags = 0; extra_idx = - EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes); + EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes); if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { vb->planes[extra_idx].m.userptr = (unsigned long)fill_buf_done->extra_data_buffer; @@ -2279,8 +2279,8 @@ int msm_comm_scale_clocks_load(struct msm_vidc_core *core, list_for_each_entry(inst, &core->instances, list) { codec = inst->session_type == MSM_VIDC_DECODER ? - inst->fmts[OUTPUT_PORT]->fourcc : - inst->fmts[CAPTURE_PORT]->fourcc; + inst->fmts[OUTPUT_PORT].fourcc : + inst->fmts[CAPTURE_PORT].fourcc; if (msm_comm_turbo_session(inst)) clk_scale_data.power_mode[num_sessions] = @@ -2711,9 +2711,9 @@ static int msm_comm_session_init(int flipped_state, goto exit; } if (inst->session_type == MSM_VIDC_DECODER) { - fourcc = inst->fmts[OUTPUT_PORT]->fourcc; + fourcc = inst->fmts[OUTPUT_PORT].fourcc; } else if (inst->session_type == MSM_VIDC_ENCODER) { - fourcc = inst->fmts[CAPTURE_PORT]->fourcc; + fourcc = inst->fmts[CAPTURE_PORT].fourcc; } else { dprintk(VIDC_ERR, "Invalid session\n"); return -EINVAL; @@ -3601,7 +3601,7 @@ static void populate_frame_data(struct vidc_frame_data *data, data->buffer_type = msm_comm_get_hal_output_buffer(inst); } - extra_idx = EXTRADATA_IDX(inst->fmts[port]->num_planes); + extra_idx = EXTRADATA_IDX(inst->fmts[port].num_planes); if (extra_idx && extra_idx < VIDEO_MAX_PLANES && vb->planes[extra_idx].m.userptr) { data->extradata_addr = vb->planes[extra_idx].m.userptr; @@ -4792,9 +4792,20 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core, return -EINVAL; } hdev = core->device; - if (core->state == VIDC_CORE_INIT_DONE) + if (core->state == VIDC_CORE_INIT_DONE) { + /* + * In current implementation user-initiated SSR triggers + * a fatal error from hardware. However, there is no way + * to know if fatal error is due to SSR or not. Handle + * user SSR as non-fatal. + */ + mutex_lock(&core->lock); + core->resources.debug_timeout = false; + mutex_unlock(&core->lock); rc = call_hfi_op(hdev, core_trigger_ssr, hdev->hfi_device_data, type); + } + return rc; } @@ -5265,7 +5276,7 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst) port = is_decode ? OUTPUT_PORT : CAPTURE_PORT; dprintk(VIDC_ERR, "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n", - is_decode ? "Decode" : "Encode", inst->fmts[port]->name, + is_decode ? "Decode" : "Encode", inst->fmts[port].name, inst->prop.height[port], inst->prop.width[port], inst->prop.fps, inst->prop.bitrate, !inst->bit_depth ? "8" : "10"); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c index 9e67ef096c63..0e62811bf41b 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c @@ -237,8 +237,8 @@ void msm_dcvs_init_load(struct msm_vidc_inst *inst) } fourcc = inst->session_type == MSM_VIDC_DECODER ? - inst->fmts[OUTPUT_PORT]->fourcc : - inst->fmts[CAPTURE_PORT]->fourcc; + inst->fmts[OUTPUT_PORT].fourcc : + inst->fmts[CAPTURE_PORT].fourcc; for (i = 0; i < num_rows; i++) { bool matches = msm_dcvs_check_codec_supported( @@ -589,7 +589,7 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst) } is_codec_supported = msm_dcvs_check_codec_supported( - inst->fmts[OUTPUT_PORT]->fourcc, + inst->fmts[OUTPUT_PORT].fourcc, inst->dcvs.supported_codecs, inst->session_type); if (!is_codec_supported || @@ -607,7 +607,7 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst) inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS; is_codec_supported = msm_dcvs_check_codec_supported( - inst->fmts[CAPTURE_PORT]->fourcc, + inst->fmts[CAPTURE_PORT].fourcc, inst->dcvs.supported_codecs, inst->session_type); if (!is_codec_supported || diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index d3027c08d24e..efb90c69881f 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -289,10 +289,10 @@ static ssize_t inst_info_read(struct file *file, char __user *buf, for (i = 0; i < MAX_PORT_NUM; i++) { write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ? "Output" : "Capture"); - write_str(&dbg_buf, "name : %s\n", inst->fmts[i]->name); - write_str(&dbg_buf, "planes : %d\n", inst->fmts[i]->num_planes); + write_str(&dbg_buf, "name : %s\n", inst->fmts[i].name); + write_str(&dbg_buf, "planes : %d\n", inst->fmts[i].num_planes); write_str( - &dbg_buf, "type: %s\n", inst->fmts[i]->type == OUTPUT_PORT ? + &dbg_buf, "type: %s\n", inst->fmts[i].type == OUTPUT_PORT ? "Output" : "Capture"); switch (inst->buffer_mode_set[i]) { case HAL_BUFFER_MODE_STATIC: @@ -311,7 +311,7 @@ static ssize_t inst_info_read(struct file *file, char __user *buf, write_str(&dbg_buf, "count: %u\n", inst->bufq[i].vb2_bufq.num_buffers); - for (j = 0; j < inst->fmts[i]->num_planes; j++) + for (j = 0; j < inst->fmts[i].num_planes; j++) write_str(&dbg_buf, "size for plane %d: %u\n", j, inst->bufq[i].vb2_bufq.plane_sizes[j]); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index b6e74715ad07..161e94f99040 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -261,7 +261,7 @@ struct msm_vidc_inst { void *session; struct session_prop prop; enum instance_state state; - struct msm_vidc_format *fmts[MAX_PORT_NUM]; + struct msm_vidc_format fmts[MAX_PORT_NUM]; struct buf_queue bufq[MAX_PORT_NUM]; struct msm_vidc_list pendingq; struct msm_vidc_list scratchbufs; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index 25fccab99fb3..a3080be8cd7a 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -1166,7 +1166,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb, struct device *dev) { int rc = 0; - int disable_htw = 1; int secure_vmid = VMID_INVAL; struct bus_type *bus; @@ -1192,14 +1191,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb, goto remove_cb; } - rc = iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - if (rc) { - dprintk(VIDC_ERR, "%s - disable coherent HTW failed: %s %d\n", - __func__, dev_name(dev), rc); - goto release_mapping; - } - if (cb->is_secure) { secure_vmid = get_secure_vmid(cb); rc = iommu_domain_set_attr(cb->mapping->domain, diff --git a/drivers/media/platform/msm/vidc/venus_boot.c b/drivers/media/platform/msm/vidc/venus_boot.c index 925c97a5b6e8..85c3e15edded 100644 --- a/drivers/media/platform/msm/vidc/venus_boot.c +++ b/drivers/media/platform/msm/vidc/venus_boot.c @@ -190,8 +190,6 @@ static int pil_venus_auth_and_reset(void) { int rc; - /* Need to enable this for new SMMU to set the device attribute */ - bool disable_htw = true; phys_addr_t fw_bias = venus_data->resources->firmware_base; void __iomem *reg_base = venus_data->reg_base; u32 ver; @@ -278,17 +276,6 @@ static int pil_venus_auth_and_reset(void) if (iommu_present) { phys_addr_t pa = fw_bias; - /* Enable this for new SMMU to set the device attribute */ - rc = iommu_domain_set_attr(venus_data->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (rc) { - dprintk(VIDC_ERR, - "%s: Failed to disable COHERENT_HTW: %s\n", - __func__, dev_name(dev)); - goto release_mapping; - } - rc = arm_iommu_attach_device(dev, venus_data->mapping); if (rc) { dprintk(VIDC_ERR, diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile index 81651c7dec72..a0f76581e6eb 100644 --- a/drivers/net/wireless/ath/wil6210/Makefile +++ b/drivers/net/wireless/ath/wil6210/Makefile @@ -19,6 +19,7 @@ wil6210-y += wil_platform.o wil6210-y += ethtool.o wil6210-y += wil_crash_dump.o wil6210-y += p2p.o +wil6210-y += ftm.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 60f2a2e541a9..17b419d408cd 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -17,6 +17,7 @@ #include <linux/etherdevice.h> #include "wil6210.h" #include "wmi.h" +#include "ftm.h" #define WIL_MAX_ROC_DURATION_MS 5000 @@ -36,6 +37,90 @@ static struct ieee80211_channel wil_60ghz_channels[] = { /* channel 4 not supported yet */ }; +/* Vendor id to be used in vendor specific command and events + * to user space. + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +#define QCA_NL80211_VENDOR_ID 0x001374 + +enum qca_nl80211_vendor_subcmds { + QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128, + QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129, + QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130, + QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131, + QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132, + QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134, + QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135, + QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136, +}; + +/* vendor specific commands */ +static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = { + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_get_capabilities + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_start_session + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_abort_session + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_ftm_configure_responder + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_aoa_start_measurement + }, + { + .info.vendor_id = QCA_NL80211_VENDOR_ID, + .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS, + .flags = WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_RUNNING, + .doit = wil_aoa_abort_measurement + }, +}; + +/* vendor specific events */ +static const struct nl80211_vendor_cmd_info wil_nl80211_vendor_events[] = { + [QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT + }, + [QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE + }, + [QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX] = { + .vendor_id = QCA_NL80211_VENDOR_ID, + .subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + }, +}; + static struct ieee80211_supported_band wil_band_60ghz = { .channels = wil_60ghz_channels, .n_channels = ARRAY_SIZE(wil_60ghz_channels), @@ -1483,6 +1568,11 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites); wiphy->mgmt_stypes = wil_mgmt_stypes; wiphy->features |= NL80211_FEATURE_SK_TX_STATUS; + + wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands); + wiphy->vendor_commands = wil_nl80211_vendor_commands; + wiphy->vendor_events = wil_nl80211_vendor_events; + wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events); } struct wireless_dev *wil_cfg80211_init(struct device *dev) diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c new file mode 100644 index 000000000000..5cf07343a33c --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ftm.c @@ -0,0 +1,903 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <net/netlink.h> +#include "wil6210.h" +#include "ftm.h" +#include "wmi.h" + +/* FTM session ID we use with FW */ +#define WIL_FTM_FW_SESSION_ID 1 + +/* fixed spare allocation we reserve in NL messages we allocate */ +#define WIL_FTM_NL_EXTRA_ALLOC 32 + +/* approx maximum length for FTM_MEAS_RESULT NL80211 event */ +#define WIL_FTM_MEAS_RESULT_MAX_LENGTH 2048 + +/* timeout for waiting for standalone AOA measurement, milliseconds */ +#define WIL_AOA_MEASUREMENT_TIMEOUT 1000 + +/* maximum number of allowed FTM measurements per burst */ +#define WIL_FTM_MAX_MEAS_PER_BURST 31 + +/* initial token to use on non-secure FTM measurement */ +#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2 + +#define WIL_TOF_FTM_MAX_LCI_LENGTH (240) +#define WIL_TOF_FTM_MAX_LCR_LENGTH (240) + +static const struct +nla_policy wil_nl80211_loc_policy[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE] = { .type = NLA_U64 }, + [QCA_WLAN_VENDOR_ATTR_LOC_CAPA] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE] = { .type = NLA_FLAG }, + [QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_AOA_TYPE] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK] = { .type = NLA_U32 }, +}; + +static const struct +nla_policy wil_nl80211_ftm_peer_policy[ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] = { .len = ETH_ALEN }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED }, + [QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 }, +}; + +static const struct +nla_policy wil_nl80211_ftm_meas_param_policy[ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1] = { + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION] = { .type = NLA_U8 }, + [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD] = { .type = NLA_U16 }, +}; + +static int wil_ftm_parse_meas_params(struct wil6210_priv *wil, + struct nlattr *attr, + struct wil_ftm_meas_params *params) +{ + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1]; + int rc; + + if (!attr) { + /* temporary defaults for one-shot measurement */ + params->meas_per_burst = 1; + params->burst_period = 5; /* 500 milliseconds */ + return 0; + } + rc = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX, + attr, wil_nl80211_ftm_meas_param_policy); + if (rc) { + wil_err(wil, "invalid measurement params\n"); + return rc; + } + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]) + params->meas_per_burst = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]) + params->num_of_bursts_exp = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]) + params->burst_duration = nla_get_u8( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]); + if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]) + params->burst_period = nla_get_u16( + tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]); + return 0; +} + +static int wil_ftm_validate_meas_params(struct wil6210_priv *wil, + struct wil_ftm_meas_params *params) +{ + /* temporary allow only single-burst */ + if (params->meas_per_burst > WIL_FTM_MAX_MEAS_PER_BURST || + params->num_of_bursts_exp != 0) { + wil_err(wil, "invalid measurement params\n"); + return -EINVAL; + } + + return 0; +} + +static int wil_ftm_append_meas_params(struct wil6210_priv *wil, + struct sk_buff *msg, + struct wil_ftm_meas_params *params) +{ + struct nlattr *nl_p; + + nl_p = nla_nest_start( + msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS); + if (!nl_p) + goto out_put_failure; + if (nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + params->meas_per_burst) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + params->num_of_bursts_exp) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + params->burst_duration) || + nla_put_u16(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + params->burst_period)) + goto out_put_failure; + nla_nest_end(msg, nl_p); + return 0; +out_put_failure: + return -ENOBUFS; +} + +static int wil_ftm_append_peer_meas_res(struct wil6210_priv *wil, + struct sk_buff *msg, + struct wil_ftm_peer_meas_res *res) +{ + struct nlattr *nl_mres, *nl_f; + int i; + + if (nla_put(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + ETH_ALEN, res->mac_addr) || + nla_put_u32(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + res->flags) || + nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + res->status)) + goto out_put_failure; + if (res->status == QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED && + nla_put_u8(msg, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + res->value_seconds)) + goto out_put_failure; + if (res->has_params && + wil_ftm_append_meas_params(wil, msg, &res->params)) + goto out_put_failure; + nl_mres = nla_nest_start(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS); + if (!nl_mres) + goto out_put_failure; + for (i = 0; i < res->n_meas; i++) { + nl_f = nla_nest_start(msg, i); + if (!nl_f) + goto out_put_failure; + if (nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + res->meas[i].t1) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + res->meas[i].t2) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + res->meas[i].t3) || + nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + res->meas[i].t4)) + goto out_put_failure; + nla_nest_end(msg, nl_f); + } + nla_nest_end(msg, nl_mres); + return 0; +out_put_failure: + wil_err(wil, "fail to append peer result\n"); + return -ENOBUFS; +} + +static void wil_ftm_send_meas_result(struct wil6210_priv *wil, + struct wil_ftm_peer_meas_res *res) +{ + struct sk_buff *vendor_event = NULL; + struct nlattr *nl_res; + int rc = 0; + + wil_dbg_misc(wil, "sending %d results for peer %pM\n", + res->n_meas, res->mac_addr); + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + WIL_FTM_MEAS_RESULT_MAX_LENGTH, + QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX, + GFP_KERNEL); + if (!vendor_event) { + wil_err(wil, "fail to allocate measurement result\n"); + rc = -ENOMEM; + goto out; + } + + if (nla_put_u64( + vendor_event, QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE, + wil->ftm.session_cookie)) { + rc = -ENOBUFS; + goto out; + } + + nl_res = nla_nest_start(vendor_event, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS); + if (!nl_res) { + rc = -ENOBUFS; + goto out; + } + + rc = wil_ftm_append_peer_meas_res(wil, vendor_event, res); + if (rc) + goto out; + + nla_nest_end(vendor_event, nl_res); + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + vendor_event = NULL; +out: + if (vendor_event) + kfree_skb(vendor_event); + if (rc) + wil_err(wil, "send peer result failed, err %d\n", rc); +} + +static void wil_ftm_send_peer_res(struct wil6210_priv *wil) +{ + if (!wil->ftm.has_ftm_res || !wil->ftm.ftm_res) + return; + + wil_ftm_send_meas_result(wil, wil->ftm.ftm_res); + wil->ftm.has_ftm_res = 0; + wil->ftm.ftm_res->n_meas = 0; +} + +static void wil_aoa_measurement_timeout(struct work_struct *work) +{ + struct wil_ftm_priv *ftm = container_of(work, struct wil_ftm_priv, + aoa_timeout_work); + struct wil6210_priv *wil = container_of(ftm, struct wil6210_priv, ftm); + struct wil_aoa_meas_result res; + + wil_dbg_misc(wil, "AOA measurement timeout\n"); + + memset(&res, 0, sizeof(res)); + ether_addr_copy(res.mac_addr, wil->ftm.aoa_peer_mac_addr); + res.type = wil->ftm.aoa_type; + res.status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED; + wil_aoa_cfg80211_meas_result(wil, &res); +} + +static int +wil_ftm_cfg80211_start_session(struct wil6210_priv *wil, + struct wil_ftm_session_request *request) +{ + int rc = 0; + bool has_lci = false, has_lcr = false; + u8 max_meas = 0, *ptr; + u32 i, cmd_len; + struct wmi_tof_session_start_cmd *cmd; + + mutex_lock(&wil->ftm.lock); + if (wil->ftm.session_started) { + wil_err(wil, "FTM session already running\n"); + rc = -EAGAIN; + goto out; + } + /* for now allow measurement to associated AP only */ + if (!test_bit(wil_status_fwconnected, wil->status)) { + wil_err(wil, "must be associated\n"); + rc = -ENOTSUPP; + goto out; + } + + for (i = 0; i < request->n_peers; i++) { + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI) + has_lci = true; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR) + has_lcr = true; + max_meas = max(max_meas, + request->peers[i].params.meas_per_burst); + } + + wil->ftm.ftm_res = kzalloc(sizeof(*wil->ftm.ftm_res) + + max_meas * sizeof(struct wil_ftm_peer_meas) + + (has_lci ? WIL_TOF_FTM_MAX_LCI_LENGTH : 0) + + (has_lcr ? WIL_TOF_FTM_MAX_LCR_LENGTH : 0), GFP_KERNEL); + if (!wil->ftm.ftm_res) { + rc = -ENOMEM; + goto out; + } + ptr = (u8 *)wil->ftm.ftm_res; + ptr += sizeof(struct wil_ftm_peer_meas_res) + + max_meas * sizeof(struct wil_ftm_peer_meas); + if (has_lci) { + wil->ftm.ftm_res->lci = ptr; + ptr += WIL_TOF_FTM_MAX_LCI_LENGTH; + } + if (has_lcr) + wil->ftm.ftm_res->lcr = ptr; + wil->ftm.max_ftm_meas = max_meas; + + cmd_len = sizeof(struct wmi_tof_session_start_cmd) + + request->n_peers * sizeof(struct wmi_ftm_dest_info); + cmd = kzalloc(cmd_len, GFP_KERNEL); + if (!cmd) { + rc = -ENOMEM; + goto out_ftm_res; + } + + cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID); + cmd->num_of_dest = cpu_to_le16(request->n_peers); + for (i = 0; i < request->n_peers; i++) { + ether_addr_copy(cmd->ftm_dest_info[i].dst_mac, + request->peers[i].mac_addr); + cmd->ftm_dest_info[i].channel = request->peers[i].channel; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE) { + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_SECURED; + cmd->ftm_dest_info[i].initial_token = + request->peers[i].secure_token_id; + } else { + cmd->ftm_dest_info[i].initial_token = + WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN; + } + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_ASAP; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_LCI_REQ; + if (request->peers[i].flags & + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR) + cmd->ftm_dest_info[i].flags |= + WMI_TOF_SESSION_START_FLAG_LCR_REQ; + cmd->ftm_dest_info[i].num_of_ftm_per_burst = + request->peers[i].params.meas_per_burst; + cmd->ftm_dest_info[i].num_of_bursts_exp = + request->peers[i].params.num_of_bursts_exp; + cmd->ftm_dest_info[i].burst_duration = + request->peers[i].params.burst_duration; + cmd->ftm_dest_info[i].burst_period = + cpu_to_le16(request->peers[i].params.burst_period); + } + + rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len); + kfree(cmd); + + if (rc) + goto out_ftm_res; + + wil->ftm.session_cookie = request->session_cookie; + wil->ftm.session_started = 1; + +out_ftm_res: + if (rc) { + kfree(wil->ftm.ftm_res); + wil->ftm.ftm_res = NULL; + } +out: + mutex_unlock(&wil->ftm.lock); + return rc; +} + +static void +wil_ftm_cfg80211_session_ended(struct wil6210_priv *wil, u32 status) +{ + struct sk_buff *vendor_event = NULL; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.session_started) { + wil_dbg_misc(wil, "FTM session not started, ignoring event\n"); + goto out; + } + + /* finish the session */ + wil_dbg_misc(wil, "finishing FTM session\n"); + + /* send left-over results if any */ + wil_ftm_send_peer_res(wil); + + wil->ftm.session_started = 0; + kfree(wil->ftm.ftm_res); + wil->ftm.ftm_res = NULL; + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + WIL_FTM_NL_EXTRA_ALLOC, + QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX, + GFP_KERNEL); + if (!vendor_event) + goto out; + + if (nla_put_u64(vendor_event, + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE, + wil->ftm.session_cookie) || + nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, status)) { + wil_err(wil, "failed to fill session done event\n"); + goto out; + } + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + vendor_event = NULL; +out: + kfree_skb(vendor_event); + mutex_unlock(&wil->ftm.lock); +} + +static void wil_aoa_timer_fn(ulong x) +{ + struct wil6210_priv *wil = (void *)x; + + wil_dbg_misc(wil, "AOA timer\n"); + schedule_work(&wil->ftm.aoa_timeout_work); +} + +static int +wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil, + struct wil_aoa_meas_request *request) +{ + int rc = 0; + struct cfg80211_bss *bss; + struct wmi_aoa_meas_cmd cmd; + + mutex_lock(&wil->ftm.lock); + + if (wil->ftm.aoa_started) { + wil_err(wil, "AOA measurement already running\n"); + rc = -EAGAIN; + goto out; + } + if (request->type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) { + wil_err(wil, "invalid AOA type: %d\n", request->type); + rc = -EINVAL; + goto out; + } + + bss = cfg80211_get_bss(wil_to_wiphy(wil), NULL, request->mac_addr, + NULL, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + if (!bss) { + wil_err(wil, "Unable to find BSS\n"); + rc = -ENOENT; + goto out; + } + + memset(&cmd, 0, sizeof(cmd)); + ether_addr_copy(cmd.mac_addr, request->mac_addr); + cmd.channel = bss->channel->hw_value - 1; + cmd.aoa_meas_type = request->type; + + rc = wmi_send(wil, WMI_AOA_MEAS_CMDID, &cmd, sizeof(cmd)); + if (rc) + goto out_bss; + + ether_addr_copy(wil->ftm.aoa_peer_mac_addr, request->mac_addr); + mod_timer(&wil->ftm.aoa_timer, + jiffies + msecs_to_jiffies(WIL_AOA_MEASUREMENT_TIMEOUT)); + wil->ftm.aoa_started = 1; +out_bss: + cfg80211_put_bss(wil_to_wiphy(wil), bss); +out: + mutex_unlock(&wil->ftm.lock); + return rc; +} + +void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil, + struct wil_aoa_meas_result *result) +{ + struct sk_buff *vendor_event = NULL; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.aoa_started) { + wil_info(wil, "AOA not started, not sending result\n"); + goto out; + } + + wil_dbg_misc(wil, "sending AOA measurement result\n"); + + vendor_event = cfg80211_vendor_event_alloc( + wil_to_wiphy(wil), + wil->wdev, + result->length + WIL_FTM_NL_EXTRA_ALLOC, + QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX, + GFP_KERNEL); + if (!vendor_event) { + wil_err(wil, "fail to allocate measurement result\n"); + goto out; + } + + if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_MAC_ADDR, + ETH_ALEN, result->mac_addr) || + nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_TYPE, + result->type) || + nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, + result->status) || + nla_put_u32(vendor_event, + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK, + result->antenna_array_mask)) { + wil_err(wil, "failed to fill vendor event\n"); + goto out; + } + + if (result->length > 0 && + nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT, + result->length, result->data)) { + wil_err(wil, "failed to fill vendor event with AOA data\n"); + goto out; + } + + cfg80211_vendor_event(vendor_event, GFP_KERNEL); + + del_timer_sync(&wil->ftm.aoa_timer); + wil->ftm.aoa_started = 0; +out: + mutex_unlock(&wil->ftm.lock); +} + +void wil_ftm_evt_session_ended(struct wil6210_priv *wil, + struct wmi_tof_session_end_event *evt) +{ + u32 status; + + switch (evt->status) { + case WMI_TOF_SESSION_END_NO_ERROR: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK; + break; + case WMI_TOF_SESSION_END_PARAMS_ERROR: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID; + break; + case WMI_TOF_SESSION_END_FAIL: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED; + break; + case WMI_TOF_SESSION_END_ABORTED: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED; + break; + default: + status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED; + break; + } + + wil_ftm_cfg80211_session_ended(wil, status); +} + +void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil, + struct wmi_tof_ftm_per_dest_res_event *evt) +{ + u32 i, index; + __le64 tmp = 0; + u8 n_meas; + + mutex_lock(&wil->ftm.lock); + + if (!wil->ftm.session_started || !wil->ftm.ftm_res) { + wil_dbg_misc(wil, "Session not running, ignoring res event\n"); + goto out; + } + if (wil->ftm.has_ftm_res && + !ether_addr_equal(evt->dst_mac, wil->ftm.ftm_res->mac_addr)) { + wil_dbg_misc(wil, + "Results for previous peer not properly terminated\n"); + wil_ftm_send_peer_res(wil); + } + + if (!wil->ftm.has_ftm_res) { + ether_addr_copy(wil->ftm.ftm_res->mac_addr, evt->dst_mac); + wil->ftm.has_ftm_res = 1; + } + + n_meas = evt->actual_ftm_per_burst; + switch (evt->status) { + case WMI_PER_DEST_RES_NO_ERROR: + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK; + break; + case WMI_PER_DEST_RES_TX_RX_FAIL: + /* FW reports corrupted results here, discard. */ + n_meas = 0; + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK; + break; + case WMI_PER_DEST_RES_PARAM_DONT_MATCH: + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID; + break; + default: + wil_err(wil, "unexpected status %d\n", evt->status); + wil->ftm.ftm_res->status = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID; + break; + } + + for (i = 0; i < n_meas; i++) { + index = wil->ftm.ftm_res->n_meas; + if (index >= wil->ftm.max_ftm_meas) { + wil_dbg_misc(wil, "Too many measurements, some lost\n"); + break; + } + memcpy(&tmp, evt->responder_ftm_res[i].t1, + sizeof(evt->responder_ftm_res[i].t1)); + wil->ftm.ftm_res->meas[index].t1 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t2, + sizeof(evt->responder_ftm_res[i].t2)); + wil->ftm.ftm_res->meas[index].t2 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t3, + sizeof(evt->responder_ftm_res[i].t3)); + wil->ftm.ftm_res->meas[index].t3 = le64_to_cpu(tmp); + memcpy(&tmp, evt->responder_ftm_res[i].t4, + sizeof(evt->responder_ftm_res[i].t4)); + wil->ftm.ftm_res->meas[index].t4 = le64_to_cpu(tmp); + wil->ftm.ftm_res->n_meas++; + } + + if (evt->flags & WMI_PER_DEST_RES_BURST_REPORT_END) + wil_ftm_send_peer_res(wil); +out: + mutex_unlock(&wil->ftm.lock); +} + +void wil_aoa_evt_meas(struct wil6210_priv *wil, + struct wmi_aoa_meas_event *evt, + int len) +{ + int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data); + struct wil_aoa_meas_result *res; + + data_len = min_t(int, le16_to_cpu(evt->length), data_len); + + res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL); + if (!res) + return; + + ether_addr_copy(res->mac_addr, evt->mac_addr); + res->type = evt->aoa_meas_type; + res->antenna_array_mask = le32_to_cpu(evt->meas_rf_mask); + res->status = evt->meas_status; + res->length = data_len; + memcpy(res->data, evt->meas_data, data_len); + + wil_dbg_misc(wil, "AOA result status %d type %d mask %d length %d\n", + res->status, res->type, + res->antenna_array_mask, res->length); + + wil_aoa_cfg80211_meas_result(wil, res); + kfree(res); +} + +int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct sk_buff *skb; + struct nlattr *attr; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + /* we should get the capabilities from the FW. for now, + * report dummy capabilities for one shot measurement + */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 128); + if (!skb) + return -ENOMEM; + attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA); + if (!attr || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP | + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA) || + nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + 1) || + nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, 1) || + nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + 0) || + nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + 4) || + nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + BIT(QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE))) { + wil_err(wil, "fail to fill get_capabilities reply\n"); + kfree_skb(skb); + return -ENOMEM; + } + nla_nest_end(skb, attr); + + return cfg80211_vendor_cmd_reply(skb); +} + +int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_ftm_session_request *request; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1]; + struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1]; + struct nlattr *peer; + int rc, n_peers = 0, index = 0, tmp; + struct cfg80211_bss *bss; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len, + wil_nl80211_loc_policy); + if (rc) { + wil_err(wil, "Invalid ATTR\n"); + return rc; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS]) { + wil_err(wil, "no peers specified\n"); + return -EINVAL; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]) { + wil_err(wil, "session cookie not specified\n"); + return -EINVAL; + } + + nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], + tmp) + n_peers++; + + if (!n_peers) { + wil_err(wil, "empty peer list\n"); + return -EINVAL; + } + + /* for now only allow measurement for a single peer */ + if (n_peers != 1) { + wil_err(wil, "only single peer allowed\n"); + return -EINVAL; + } + + request = kzalloc(sizeof(*request) + + n_peers * sizeof(struct wil_ftm_meas_peer_info), + GFP_KERNEL); + if (!request) + return -ENOMEM; + + request->session_cookie = + nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]); + request->n_peers = n_peers; + nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS], + tmp) { + rc = nla_parse_nested(tb2, QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX, + peer, wil_nl80211_ftm_peer_policy); + if (rc) { + wil_err(wil, "Invalid peer ATTR\n"); + goto out; + } + if (!tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] || + nla_len(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]) + != ETH_ALEN) { + wil_err(wil, "Peer MAC address missing or invalid\n"); + rc = -EINVAL; + goto out; + } + memcpy(request->peers[index].mac_addr, + nla_data(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]), + ETH_ALEN); + bss = cfg80211_get_bss(wiphy, NULL, + request->peers[index].mac_addr, NULL, 0, + IEEE80211_BSS_TYPE_ANY, + IEEE80211_PRIVACY_ANY); + if (!bss) { + wil_err(wil, "invalid bss at index %d\n", index); + rc = -ENOENT; + goto out; + } + request->peers[index].channel = bss->channel->hw_value - 1; + cfg80211_put_bss(wiphy, bss); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]) + request->peers[index].flags = nla_get_u32( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]); + if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]) + request->peers[index].secure_token_id = nla_get_u8( + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]); + rc = wil_ftm_parse_meas_params( + wil, + tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS], + &request->peers[index].params); + if (!rc) + rc = wil_ftm_validate_meas_params( + wil, &request->peers[index].params); + if (rc) + goto out; + index++; + } + + rc = wil_ftm_cfg80211_start_session(wil, request); +out: + kfree(request); + return rc; +} + +int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wil_aoa_meas_request request; + struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1]; + int rc; + + if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities)) + return -ENOTSUPP; + + wil_dbg_misc(wil, "AOA start measurement\n"); + + rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len, + wil_nl80211_loc_policy); + if (rc) { + wil_err(wil, "Invalid ATTR\n"); + return rc; + } + + if (!tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR] || + !tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) { + wil_err(wil, "Must specify MAC address and type\n"); + return -EINVAL; + } + + memset(&request, 0, sizeof(request)); + ether_addr_copy(request.mac_addr, + nla_data(tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR])); + request.type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]); + + rc = wil_aoa_cfg80211_start_measurement(wil, &request); + return rc; +} + +int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len) +{ + struct wil6210_priv *wil = wiphy_to_wil(wiphy); + + wil_dbg_misc(wil, "stub\n"); + return -ENOTSUPP; +} + +void wil_ftm_init(struct wil6210_priv *wil) +{ + mutex_init(&wil->ftm.lock); + setup_timer(&wil->ftm.aoa_timer, wil_aoa_timer_fn, (ulong)wil); + INIT_WORK(&wil->ftm.aoa_timeout_work, wil_aoa_measurement_timeout); +} + +void wil_ftm_deinit(struct wil6210_priv *wil) +{ + del_timer_sync(&wil->ftm.aoa_timer); + cancel_work_sync(&wil->ftm.aoa_timeout_work); + kfree(wil->ftm.ftm_res); +} + +void wil_ftm_stop_operations(struct wil6210_priv *wil) +{ + wil_ftm_cfg80211_session_ended( + wil, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED); +} diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h new file mode 100644 index 000000000000..9721344579aa --- /dev/null +++ b/drivers/net/wireless/ath/wil6210/ftm.h @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __WIL6210_FTM_H__ +#define __WIL6210_FTM_H__ + +/** + * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID, + * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and + * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in + * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that + */ + +/** + * enum qca_vendor_attr_loc - attributes for FTM and AOA commands + * + * @QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE: Session cookie, specified in + * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. It will be provided by driver + * events and can be used to identify events targeted for this session. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA: Nested attribute containing extra + * FTM/AOA capabilities, returned by %QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA. + * see %enum qca_wlan_vendor_attr_loc_capa. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS: array of nested attributes + * containing information about each peer in measurement session + * request. See %enum qca_wlan_vendor_attr_peer_info for supported + * attributes for each peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RESULTS: nested attribute containing + * measurement results for a peer. reported by the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event. + * See %enum qca_wlan_vendor_attr_peer_result for list of supported + * attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE: flag attribute for + * enabling or disabling responder functionality. + * @QCA_WLAN_VENDOR_ATTR_FTM_LCI: used in the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to + * specify the LCI report that will be sent by the responder during + * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0, + * 9.4.2.22.10 + * @QCA_WLAN_VENDOR_ATTR_FTM_LCR: provided with the + * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to + * specify the location civic report that will be sent by the responder during + * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0, + * 9.4.2.22.13 + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS: session/measurement completion + * status code, reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE + * and %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN: initial dialog token used + * by responder (0 if not specified) + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE: AOA measurement type. Requested in + * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS and optionally in + * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if AOA measurements + * are needed as part of an FTM session. + * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT. + * See enum qca_wlan_vendor_attr_aoa_type. + * @QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK: bit mask indicating + * which antenna arrays were used in location measurement. + * Reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and + * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT: AOA measurement data. + * Its contents depends on the AOA type and antenna array mask: + * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values, + * phase of the strongest CIR path for each antenna in the measured + * array(s). + * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16 + * values, phase and amplitude of the strongest CIR path for each + * antenna in the measured array(s) + */ +enum qca_wlan_vendor_attr_loc { + /* we reuse these attributes */ + QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6, + QCA_WLAN_VENDOR_ATTR_PAD = 13, + QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17, + QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18, + QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19, + QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21, + QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23, + QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24, + QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_MAX = QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa - indoor location capabilities + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: various flags. See + * %enum qca_wlan_vendor_attr_loc_capa_flags + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number + * of measurement sessions that can run concurrently. + * Default is one session (no session concurrency) + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique + * peers that are supported in running sessions. For example, + * if the value is 8 and maximum number of sessions is 2, you can + * have one session with 8 unique peers, or 2 sessions with 4 unique + * peers each, and so on. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number + * of bursts per peer, as an exponent (2^value). Default is 0, + * meaning no multi-burst support. + * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number + * of measurement exchanges allowed in a single burst + * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement + * types. A bit mask (unsigned 32 bit value), each bit corresponds + * to an AOA type as defined by %enum qca_vendor_attr_aoa_type. + */ +enum qca_wlan_vendor_attr_loc_capa { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX = + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags + * + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver + * can be configured as an FTM responder (for example, an AP that + * services FTM requests). %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver + * can run FTM sessions. %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION + * will be supported if set. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder + * supports immediate (ASAP) response. + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone + * AOA measurement using %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS + * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports + * requesting AOA measurements as part of an FTM session. + */ +enum qca_wlan_vendor_attr_loc_capa_flags { + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3, + QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4, +}; + +/** + * enum qca_wlan_vendor_attr_peer_info: information about + * a single peer in a measurement session. + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related + * to measurement. See %enum qca_wlan_vendor_attr_ftm_peer_meas_flags. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of + * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.167. See %enum qca_wlan_vendor_attr_ftm_meas_param for + * list of supported attributes. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for + * secure measurement + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA + * measurement every _value_ bursts. If 0 or not specified, + * AOA measurements will be disabled for this peer. + */ +enum qca_wlan_vendor_attr_ftm_peer_info { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags, + * per-peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request + * immediate (ASAP) response from peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request + * LCI report from peer. The LCI report includes the absolute + * location of the peer in "official" coordinates (similar to GPS). + * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request + * Location civic report from peer. The LCR includes the location + * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0, + * 11.24.6.7 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set, + * request a secure measurement. + * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided. + */ +enum qca_wlan_vendor_attr_ftm_peer_meas_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements + * to perform in a single burst. + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to + * perform, specified as an exponent (2^value) + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst + * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167 + * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts, + * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must + * be larger than %QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION + */ +enum qca_wlan_vendor_attr_ftm_meas_param { + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported + * peer + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement + * request for this peer. + * See %enum qca_wlan_vendor_attr_ftm_peer_result_status + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related + * to measurement results for this peer. + * See %enum qca_wlan_vendor_attr_ftm_peer_result_flags + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when + * request failed and peer requested not to send an additional request + * for this number of seconds. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received + * from peer. In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.10 + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when + * received from peer.In the format specified by IEEE P802.11-REVmc/D7.0, + * 9.4.2.22.13 + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer + * overridden some measurement request parameters. See + * enum qca_wlan_vendor_attr_ftm_meas_param. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement + * for this peer. Same contents as %QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement + * results. Each entry is a nested attribute defined + * by enum qca_wlan_vendor_attr_ftm_meas. + */ +enum qca_wlan_vendor_attr_ftm_peer_result { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_status + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results + * will be provided. Peer may have overridden some measurement parameters, + * in which case overridden parameters will be report by + * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS attribute + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable + * of performing the measurement request. No more results will be sent + * for this peer in this session. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request + * failed, and requested not to send an additional request for number + * of seconds specified by %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS + * attribute. + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation + * failed. Request was not sent over the air. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_status { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED, + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_peer_result_flags : Various flags + * for measurement result, per-peer + * + * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set, + * measurement completed for this peer. No more results will be reported + * for this peer in this session. + */ +enum qca_wlan_vendor_attr_ftm_peer_result_flags { + QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0, +}; + +/** + * enum qca_vendor_attr_loc_session_status: Session completion status code + * + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed + * successfully. + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted + * by request + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request + * was invalid and was not started + * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error + * and did not complete normally (for example out of resources) + * + */ +enum qca_vendor_attr_loc_session_status { + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID, + QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED, +}; + +/** + * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data + * + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure(TOD) of FTM packet as + * recorded by responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival(TOA) of FTM packet at + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by + * initiator, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at + * responder, in picoseconds. + * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded + * during this measurement exchange. Optional and will be provided if + * the hardware can measure it. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by + * responder. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by + * initiator. Not always provided. + * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information. + * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding. + */ +enum qca_wlan_vendor_attr_ftm_meas { + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD, + /* keep last */ + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST, + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX = + QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1, +}; + +/** + * enum qca_wlan_vendor_attr_aoa_type: AOA measurement type + * + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest + * CIR (channel impulse response) path for each antenna. + * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude + * of the strongest CIR path for each antenna. + */ +enum qca_wlan_vendor_attr_aoa_type { + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP, + QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX, +}; + +/* vendor event indices, used from both cfg80211.c and ftm.c */ +enum qca_nl80211_vendor_events_index { + QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX, + QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX, + QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX, +}; + +/* measurement parameters. Specified for each peer as part + * of measurement request, or provided with measurement + * results for peer in case peer overridden parameters + */ +struct wil_ftm_meas_params { + u8 meas_per_burst; + u8 num_of_bursts_exp; + u8 burst_duration; + u16 burst_period; +}; + +/* measurement request for a single peer */ +struct wil_ftm_meas_peer_info { + u8 mac_addr[ETH_ALEN]; + u8 channel; + u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */ + struct wil_ftm_meas_params params; + u8 secure_token_id; +}; + +/* session request, passed to wil_ftm_cfg80211_start_session */ +struct wil_ftm_session_request { + u64 session_cookie; + u32 n_peers; + /* keep last, variable size according to n_peers */ + struct wil_ftm_meas_peer_info peers[0]; +}; + +/* single measurement for a peer */ +struct wil_ftm_peer_meas { + u64 t1, t2, t3, t4; +}; + +/* measurement results for a single peer */ +struct wil_ftm_peer_meas_res { + u8 mac_addr[ETH_ALEN]; + u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_result_flags */ + u8 status; /* enum qca_wlan_vendor_attr_ftm_peer_result_status */ + u8 value_seconds; + bool has_params; /* true if params is valid */ + struct wil_ftm_meas_params params; /* peer overridden params */ + u8 *lci; + u8 lci_length; + u8 *lcr; + u8 lcr_length; + u32 n_meas; + /* keep last, variable size according to n_meas */ + struct wil_ftm_peer_meas meas[0]; +}; + +/* standalone AOA measurement request */ +struct wil_aoa_meas_request { + u8 mac_addr[ETH_ALEN]; + u32 type; +}; + +/* AOA measurement result */ +struct wil_aoa_meas_result { + u8 mac_addr[ETH_ALEN]; + u32 type; + u32 antenna_array_mask; + u32 status; + u32 length; + /* keep last, variable size according to length */ + u8 data[0]; +}; + +/* private data related to FTM. Part of the wil6210_priv structure */ +struct wil_ftm_priv { + struct mutex lock; /* protects the FTM data */ + u8 session_started; + u64 session_cookie; + struct wil_ftm_peer_meas_res *ftm_res; + u8 has_ftm_res; + u32 max_ftm_meas; + + /* standalone AOA measurement */ + u8 aoa_started; + u8 aoa_peer_mac_addr[ETH_ALEN]; + u32 aoa_type; + struct timer_list aoa_timer; + struct work_struct aoa_timeout_work; +}; + +int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); +int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int data_len); + +#endif /* __WIL6210_FTM_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index a509841c3187..5285ebc8b9af 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -518,6 +518,8 @@ int wil_priv_init(struct wil6210_priv *wil) spin_lock_init(&wil->wmi_ev_lock); init_waitqueue_head(&wil->wq); + wil_ftm_init(wil); + wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi"); if (!wil->wmi_wq) return -EAGAIN; @@ -565,6 +567,7 @@ void wil_priv_deinit(struct wil6210_priv *wil) { wil_dbg_misc(wil, "%s()\n", __func__); + wil_ftm_deinit(wil); wil_set_recovery_state(wil, fw_recovery_idle); del_timer_sync(&wil->scan_timer); del_timer_sync(&wil->p2p.discovery_timer); @@ -1056,6 +1059,7 @@ int __wil_down(struct wil6210_priv *wil) wil_enable_irq(wil); wil_p2p_stop_radio_operations(wil); + wil_ftm_stop_operations(wil); mutex_lock(&wil->p2p_wdev_mutex); if (wil->scan_request) { diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index ce33e919d321..a19dba5b9e5f 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -25,6 +25,7 @@ #include <linux/types.h> #include "wmi.h" #include "wil_platform.h" +#include "ftm.h" extern bool no_fw_recovery; extern unsigned int mtu_max; @@ -668,6 +669,8 @@ struct wil6210_priv { /* High Access Latency Policy voting */ struct wil_halp halp; + struct wil_ftm_priv ftm; + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; @@ -872,6 +875,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go); int wmi_pcp_stop(struct wil6210_priv *wil); int wmi_led_cfg(struct wil6210_priv *wil, bool enable); +int wmi_aoa_meas(struct wil6210_priv *wil, const void *mac_addr, u8 chan, + u8 type); void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, u16 reason_code, bool from_event); void wil_probe_client_flush(struct wil6210_priv *wil); @@ -915,4 +920,18 @@ void wil_halp_unvote(struct wil6210_priv *wil); void wil6210_set_halp(struct wil6210_priv *wil); void wil6210_clear_halp(struct wil6210_priv *wil); +void wil_ftm_init(struct wil6210_priv *wil); +void wil_ftm_deinit(struct wil6210_priv *wil); +void wil_ftm_stop_operations(struct wil6210_priv *wil); +void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil, + struct wil_aoa_meas_result *result); + +void wil_ftm_evt_session_ended(struct wil6210_priv *wil, + struct wmi_tof_session_end_event *evt); +void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil, + struct wmi_tof_ftm_per_dest_res_event *evt); +void wil_aoa_evt_meas(struct wil6210_priv *wil, + struct wmi_aoa_meas_event *evt, + int len); + #endif /* __WIL6210_H__ */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 6ec3ddc5b6f1..daa7a33d12d8 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -22,6 +22,7 @@ #include "txrx.h" #include "wmi.h" #include "trace.h" +#include "ftm.h" static uint max_assoc_sta = WIL6210_MAX_CID; module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); @@ -772,6 +773,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) spin_unlock_bh(&sta->tid_rx_lock); } +static void wmi_evt_aoa_meas(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_aoa_meas_event *evt = d; + + wil_aoa_evt_meas(wil, evt, len); +} + +static void wmi_evt_ftm_session_ended(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_tof_session_end_event *evt = d; + + wil_ftm_evt_session_ended(wil, evt); +} + +static void wmi_evt_per_dest_res(struct wil6210_priv *wil, int id, + void *d, int len) +{ + struct wmi_tof_ftm_per_dest_res_event *evt = d; + + wil_ftm_evt_per_dest_res(wil, evt); +} + /** * Some events are ignored for purpose; and need not be interpreted as * "unhandled events" @@ -799,6 +824,13 @@ static const struct { {WMI_DELBA_EVENTID, wmi_evt_delba}, {WMI_VRING_EN_EVENTID, wmi_evt_vring_en}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore}, + {WMI_AOA_MEAS_EVENTID, wmi_evt_aoa_meas}, + {WMI_TOF_SESSION_END_EVENTID, wmi_evt_ftm_session_ended}, + {WMI_TOF_GET_CAPABILITIES_EVENTID, wmi_evt_ignore}, + {WMI_TOF_SET_LCR_EVENTID, wmi_evt_ignore}, + {WMI_TOF_SET_LCI_EVENTID, wmi_evt_ignore}, + {WMI_TOF_FTM_PER_DEST_RES_EVENTID, wmi_evt_per_dest_res}, + {WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore}, }; /* diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c index ec6955452391..1e56d445c6e1 100644 --- a/drivers/net/wireless/cnss/cnss_pci.c +++ b/drivers/net/wireless/cnss/cnss_pci.c @@ -1404,7 +1404,6 @@ static int cnss_wlan_is_codeswap_supported(u16 revision) static int cnss_smmu_init(struct device *dev) { struct dma_iommu_mapping *mapping; - int disable_htw = 1; int atomic_ctx = 1; int ret; @@ -1418,15 +1417,6 @@ static int cnss_smmu_init(struct device *dev) } ret = iommu_domain_set_attr(mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (ret) { - pr_err("%s: set disable_htw attribute failed, err = %d\n", - __func__, ret); - goto set_attr_fail; - } - - ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx); if (ret) { diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index af1e5a70d585..352defe6204b 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -170,7 +170,12 @@ static void gsi_handle_glob_err(uint32_t err) gsi_ctx->per.notify_cb(&per_notify); break; case GSI_ERR_TYPE_CHAN: - BUG_ON(log->virt_idx >= GSI_MAX_CHAN); + if (log->virt_idx >= gsi_ctx->max_ch) { + GSIERR("Unexpected ch %d\n", log->virt_idx); + WARN_ON(1); + return; + } + ch = &gsi_ctx->chan[log->virt_idx]; chan_notify.chan_user_data = ch->props.chan_user_data; chan_notify.err_desc = err & 0xFFFF; @@ -213,7 +218,12 @@ static void gsi_handle_glob_err(uint32_t err) WARN_ON(1); break; case GSI_ERR_TYPE_EVT: - BUG_ON(log->virt_idx >= GSI_MAX_EVT_RING); + if (log->virt_idx >= gsi_ctx->max_ev) { + GSIERR("Unexpected ev %d\n", log->virt_idx); + WARN_ON(1); + return; + } + ev = &gsi_ctx->evtr[log->virt_idx]; evt_notify.user_data = ev->props.user_data; evt_notify.err_desc = err & 0xFFFF; @@ -257,6 +267,9 @@ static void gsi_handle_glob_ee(int ee) if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) { err = gsi_readl(gsi_ctx->base + GSI_EE_n_ERROR_LOG_OFFS(ee)); + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(ee)); gsi_writel(clr, gsi_ctx->base + GSI_EE_n_ERROR_LOG_CLR_OFFS(ee)); gsi_handle_glob_err(err); @@ -311,7 +324,12 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt, uint64_t rp; ch_id = evt->chid; - BUG_ON(ch_id >= GSI_MAX_CHAN); + if (ch_id >= gsi_ctx->max_ch) { + GSIERR("Unexpected ch %d\n", ch_id); + WARN_ON(1); + return; + } + ch_ctx = &gsi_ctx->chan[ch_id]; BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI); rp = evt->xfer_ptr; @@ -567,6 +585,75 @@ static irqreturn_t gsi_isr(int irq, void *ctxt) return IRQ_HANDLED; } +static uint32_t gsi_get_max_channels(enum gsi_ver ver) +{ + uint32_t reg; + + switch (ver) { + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT; + break; + default: + GSIERR("bad gsi version %d\n", ver); + WARN_ON(1); + reg = 0; + } + + GSIDBG("max channels %d\n", reg); + + return reg; +} + +static uint32_t gsi_get_max_event_rings(enum gsi_ver ver) +{ + uint32_t reg; + + switch (ver) { + case GSI_VER_1_0: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_2: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >> + GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT; + break; + case GSI_VER_1_3: + reg = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + reg = (reg & + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >> + GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT; + break; + default: + GSIERR("bad gsi version %d\n", ver); + WARN_ON(1); + reg = 0; + } + + GSIDBG("max event rings %d\n", reg); + + return reg; +} int gsi_complete_clk_grant(unsigned long dev_hdl) { unsigned long flags; @@ -611,6 +698,11 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) return -GSI_STATUS_INVALID_PARAMS; } + if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) { + GSIERR("bad params gsi_ver=%d\n", props->ver); + return -GSI_STATUS_INVALID_PARAMS; + } + if (!props->notify_cb) { GSIERR("notify callback must be provided\n"); return -GSI_STATUS_INVALID_PARAMS; @@ -668,8 +760,25 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) mutex_init(&gsi_ctx->mlock); atomic_set(&gsi_ctx->num_chan, 0); atomic_set(&gsi_ctx->num_evt_ring, 0); - /* only support 16 un-reserved + 7 reserved event virtual IDs */ - gsi_ctx->evt_bmap = ~0x7E03FF; + gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver); + if (gsi_ctx->max_ch == 0) { + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("failed to get max channels\n"); + return -GSI_STATUS_ERROR; + } + gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver); + if (gsi_ctx->max_ev == 0) { + devm_iounmap(gsi_ctx->dev, gsi_ctx->base); + devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx); + GSIERR("failed to get max event rings\n"); + return -GSI_STATUS_ERROR; + } + + /* bitmap is max events excludes reserved events */ + gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1); + gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^ + ((1 << GSI_MHI_ER_START) - 1); /* * enable all interrupts but GSI_BREAK_POINT. @@ -693,6 +802,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl) else GSIERR("Manager EE has not enabled GSI, GSI un-usable\n"); + if (gsi_ctx->per.ver >= GSI_VER_1_2) + gsi_writel(0, gsi_ctx->base + + GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee)); + *dev_hdl = (uintptr_t)gsi_ctx; return GSI_STATUS_SUCCESS; @@ -1059,7 +1172,7 @@ int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl, return -GSI_STATUS_NODEV; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1093,7 +1206,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl) return -GSI_STATUS_NODEV; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1160,7 +1273,7 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, return -GSI_STATUS_INVALID_PARAMS; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1194,7 +1307,7 @@ int gsi_reset_evt_ring(unsigned long evt_ring_hdl) return -GSI_STATUS_NODEV; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1255,7 +1368,7 @@ int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl, return -GSI_STATUS_INVALID_PARAMS; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1291,7 +1404,7 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl, return -GSI_STATUS_INVALID_PARAMS; } - if (evt_ring_hdl >= GSI_MAX_EVT_RING) { + if (evt_ring_hdl >= gsi_ctx->max_ev) { GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1382,7 +1495,7 @@ static int gsi_validate_channel_props(struct gsi_chan_props *props) { uint64_t ra; - if (props->ch_id >= GSI_MAX_CHAN) { + if (props->ch_id >= gsi_ctx->max_ch) { GSIERR("ch_id %u invalid\n", props->ch_id); return -GSI_STATUS_INVALID_PARAMS; } @@ -1573,7 +1686,7 @@ int gsi_write_channel_scratch(unsigned long chan_hdl, return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1610,7 +1723,7 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl, return -GSI_STATUS_INVALID_PARAMS; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1642,7 +1755,7 @@ int gsi_start_channel(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1694,7 +1807,7 @@ int gsi_stop_channel(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1763,7 +1876,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1832,7 +1945,7 @@ int gsi_reset_channel(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -1898,7 +2011,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -2021,7 +2134,7 @@ int gsi_query_channel_info(unsigned long chan_hdl, return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN || !info) { + if (chan_hdl >= gsi_ctx->max_ch || !info) { GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info); return -GSI_STATUS_INVALID_PARAMS; } @@ -2091,7 +2204,7 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN || !is_empty) { + if (chan_hdl >= gsi_ctx->max_ch || !is_empty) { GSIERR("bad params chan_hdl=%lu is_empty=%p\n", chan_hdl, is_empty); return -GSI_STATUS_INVALID_PARAMS; @@ -2155,7 +2268,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers, return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN || !num_xfers || !xfer) { + if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) { GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n", chan_hdl, num_xfers, xfer); return -GSI_STATUS_INVALID_PARAMS; @@ -2242,7 +2355,7 @@ int gsi_start_xfer(unsigned long chan_hdl) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -2278,7 +2391,7 @@ int gsi_poll_channel(unsigned long chan_hdl, return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN || !notify) { + if (chan_hdl >= gsi_ctx->max_ch || !notify) { GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify); return -GSI_STATUS_INVALID_PARAMS; } @@ -2327,7 +2440,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode) return -GSI_STATUS_NODEV; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode); return -GSI_STATUS_INVALID_PARAMS; } @@ -2390,7 +2503,7 @@ int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, return -GSI_STATUS_INVALID_PARAMS; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -2426,7 +2539,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props, return -GSI_STATUS_INVALID_PARAMS; } - if (chan_hdl >= GSI_MAX_CHAN) { + if (chan_hdl >= gsi_ctx->max_ch) { GSIERR("bad params chan_hdl=%lu\n", chan_hdl); return -GSI_STATUS_INVALID_PARAMS; } @@ -2471,9 +2584,9 @@ static void gsi_configure_ieps(void *base) gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS); gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS); gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS); - gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS); - gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS); - gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS); + gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS); + gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS); + gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS); gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS); gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS); gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS); @@ -2502,9 +2615,9 @@ static void gsi_configure_bck_prs_matrix(void *base) gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS); gsi_writel(0x00000000, gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS); - gsi_writel(0x00ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS); gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS); - gsi_writel(0xfdffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS); + gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS); gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS); gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS); gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS); @@ -2551,15 +2664,35 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size) } /* Enable the MCS and set to x2 clocks */ - value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & - GSI_GSI_CFG_GSI_ENABLE_BMSK) | - ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & - GSI_GSI_CFG_MCS_ENABLE_BMSK) | - ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & - GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | - ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & - GSI_GSI_CFG_UC_IS_MCS_BMSK)); - gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + if (gsi_ctx->per.ver >= GSI_VER_1_2) { + value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK); + gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS); + + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK) | + ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) & + GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) | + ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) & + GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK)); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + } else { + value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) & + GSI_GSI_CFG_GSI_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) & + GSI_GSI_CFG_MCS_ENABLE_BMSK) | + ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) & + GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) | + ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) & + GSI_GSI_CFG_UC_IS_MCS_BMSK)); + gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS); + } iounmap(gsi_base); diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h index 1d438ffb8b76..0b94ed2d3a92 100644 --- a/drivers/platform/msm/gsi/gsi.h +++ b/drivers/platform/msm/gsi/gsi.h @@ -19,8 +19,8 @@ #include <linux/spinlock.h> #include <linux/msm_gsi.h> -#define GSI_MAX_CHAN 31 -#define GSI_MAX_EVT_RING 23 +#define GSI_CHAN_MAX 31 +#define GSI_EVT_RING_MAX 23 #define GSI_NO_EVT_ERINDEX 31 #define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) @@ -130,8 +130,8 @@ struct gsi_ctx { struct device *dev; struct gsi_per_props per; bool per_registered; - struct gsi_chan_ctx chan[GSI_MAX_CHAN]; - struct gsi_evt_ctx evtr[GSI_MAX_EVT_RING]; + struct gsi_chan_ctx chan[GSI_CHAN_MAX]; + struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX]; struct mutex mlock; spinlock_t slock; unsigned long evt_bmap; @@ -141,6 +141,8 @@ struct gsi_ctx { struct gsi_ee_scratch scratch; int num_ch_dp_stats; struct workqueue_struct *dp_stat_wq; + u32 max_ch; + u32 max_ev; }; enum gsi_re_type { diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c index 2ab8b79acc6d..5eb9084292a4 100644 --- a/drivers/platform/msm/gsi/gsi_dbg.c +++ b/drivers/platform/msm/gsi/gsi_dbg.c @@ -71,7 +71,7 @@ static ssize_t gsi_dump_evt(struct file *file, TDBG("arg1=%u arg2=%u\n", arg1, arg2); - if (arg1 >= GSI_MAX_EVT_RING) { + if (arg1 >= gsi_ctx->max_ev) { TERR("invalid evt ring id %u\n", arg1); return -EFAULT; } @@ -184,7 +184,7 @@ static ssize_t gsi_dump_ch(struct file *file, TDBG("arg1=%u arg2=%u\n", arg1, arg2); - if (arg1 >= GSI_MAX_CHAN) { + if (arg1 >= gsi_ctx->max_ch) { TERR("invalid chan id %u\n", arg1); return -EFAULT; } @@ -271,9 +271,30 @@ static ssize_t gsi_dump_ee(struct file *file, val = gsi_readl(gsi_ctx->base + GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee)); TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val); - val = gsi_readl(gsi_ctx->base + - GSI_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); - TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val); + if (gsi_ctx->per.ver == GSI_VER_1_0) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val); + } else if (gsi_ctx->per.ver == GSI_VER_1_2) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val); + } else if (gsi_ctx->per.ver == GSI_VER_1_3) { + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val); + val = gsi_readl(gsi_ctx->base + + GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee)); + TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val); + } else { + WARN_ON(1); + } val = gsi_readl(gsi_ctx->base + GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee)); TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val); @@ -329,7 +350,7 @@ static ssize_t gsi_dump_map(struct file *file, int i; TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap); - for (i = 0; i < GSI_MAX_CHAN; i++) { + for (i = 0; i < gsi_ctx->max_ch; i++) { ctx = &gsi_ctx->chan[i]; if (ctx->allocated) { @@ -402,8 +423,8 @@ static ssize_t gsi_dump_stats(struct file *file, if (ch_id == -1) { min = 0; - max = GSI_MAX_CHAN; - } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN || + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || !gsi_ctx->chan[ch_id].allocated) { goto error; } else { @@ -464,7 +485,7 @@ static ssize_t gsi_enable_dp_stats(struct file *file, if (kstrtos32(dbg_buff + 1, 0, &ch_id)) goto error; - if (ch_id < 0 || ch_id >= GSI_MAX_CHAN || + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || !gsi_ctx->chan[ch_id].allocated) { goto error; } @@ -540,7 +561,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file, /* get */ if (kstrtou32(dbg_buff, 0, &ch_id)) goto error; - if (ch_id >= GSI_MAX_CHAN) + if (ch_id >= gsi_ctx->max_ch) goto error; PRT_STAT("ch %d: max_re_expected=%d\n", ch_id, gsi_ctx->chan[ch_id].props.max_re_expected); @@ -553,7 +574,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file, TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem); - if (ch_id >= GSI_MAX_CHAN) { + if (ch_id >= gsi_ctx->max_ch) { TERR("invalid chan id %u\n", ch_id); goto error; } @@ -572,7 +593,7 @@ static void gsi_wq_print_dp_stats(struct work_struct *work) { int ch_id; - for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) { + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { if (gsi_ctx->chan[ch_id].print_dp_stats) gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]); } @@ -618,7 +639,7 @@ static void gsi_wq_update_dp_stats(struct work_struct *work) { int ch_id; - for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) { + for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) { if (gsi_ctx->chan[ch_id].allocated && gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI && gsi_ctx->chan[ch_id].enable_dp_stats) @@ -649,8 +670,8 @@ static ssize_t gsi_rst_stats(struct file *file, if (ch_id == -1) { min = 0; - max = GSI_MAX_CHAN; - } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN || + max = gsi_ctx->max_ch; + } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || !gsi_ctx->chan[ch_id].allocated) { goto error; } else { @@ -691,7 +712,7 @@ static ssize_t gsi_print_dp_stats(struct file *file, if (kstrtos32(dbg_buff + 1, 0, &ch_id)) goto error; - if (ch_id < 0 || ch_id >= GSI_MAX_CHAN || + if (ch_id < 0 || ch_id >= gsi_ctx->max_ch || !gsi_ctx->chan[ch_id].allocated) { goto error; } diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h index 36a74105b490..fa1e84896f73 100644 --- a/drivers/platform/msm/gsi/gsi_reg.h +++ b/drivers/platform/msm/gsi/gsi_reg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,10 @@ #define GSI_GSI_CFG_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000000) #define GSI_GSI_CFG_RMSK 0xf +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20 +#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5 +#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10 +#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4 #define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8 #define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3 #define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4 @@ -26,6 +30,11 @@ #define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1 #define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0 +#define GSI_GSI_MCS_CFG_OFFS \ + (GSI_GSI_REG_BASE_OFFS + 0x0000B000) +#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1 +#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0 + #define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000008) #define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff @@ -99,8 +108,20 @@ #define GSI_GSI_CGC_CTRL_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000060) #define GSI_GSI_CGC_CTRL_RMSK 0x3f -#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_BMSK 0x20 -#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_SHFT 0x5 +#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800 +#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb +#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400 +#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa +#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200 +#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9 +#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100 +#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8 +#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80 +#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7 +#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40 +#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6 +#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20 +#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5 #define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10 #define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4 #define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8 @@ -619,23 +640,23 @@ #define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff #define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0 -#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS \ +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000430) -#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_RMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_SHFT 0x0 +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0 -#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS \ +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000434) -#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_RMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_SHFT 0x0 +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0 -#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS \ +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00000438) -#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_RMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_BMSK 0xfff -#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_SHFT 0x0 +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff +#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0 #define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x0000043c) @@ -701,7 +722,9 @@ #define GSI_GSI_DEBUG_BUSY_REG_OFFS \ (GSI_GSI_REG_BASE_OFFS + 0x00001010) -#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0x7f +#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff +#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80 +#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7 #define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40 #define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6 #define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20 @@ -1345,22 +1368,150 @@ #define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff #define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0 -#define GSI_EE_n_GSI_HW_PARAM_OFFS(n) \ +/* v1.0 */ +#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 + +/* v1.2 */ +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ + 0x80000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ + 0x40000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff +#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 + +/* v1.3 */ +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \ + (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n)) +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \ + 0x80000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \ + 0x40000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0 + +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n)) -#define GSI_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff -#define GSI_EE_n_GSI_HW_PARAM_MAXn 3 -#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000 -#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a -#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000 -#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19 -#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000 -#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14 -#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000 -#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10 -#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00 -#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8 -#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff -#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0 +#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1 #define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \ (GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n)) @@ -1662,7 +1813,7 @@ #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3 #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \ - 0xffffffff + 0x00003fff #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \ @@ -1670,7 +1821,7 @@ #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3 #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \ - 0xffffffff + 0x000003ff #define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0 #define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \ diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c index 9cb0b1f3c379..804c89dc9533 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c @@ -207,7 +207,6 @@ struct platform_device *ipa_pdev; static struct { bool present; bool arm_smmu; - bool disable_htw; bool fast_map; bool s1_bypass; u32 ipa_base; @@ -4313,9 +4312,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ; - smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node, - "qcom,smmu-disable-htw"); - /* Get IPA HW Version */ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", &ipa_drv_res->ipa_hw_type); @@ -4502,7 +4498,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, static int ipa_smmu_wlan_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx(); - int disable_htw = 1; int atomic_ctx = 1; int fast = 1; int bypass = 1; @@ -4519,17 +4514,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev) } cb->valid = true; - if (smmu_info.disable_htw) { - ret = iommu_domain_set_attr(cb->iommu, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (ret) { - IPAERR("couldn't disable coherent HTW\n"); - cb->valid = false; - return -EIO; - } - } - if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->iommu, DOMAIN_ATTR_S1_BYPASS, @@ -4589,7 +4573,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev) static int ipa_smmu_uc_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx(); - int disable_htw = 1; int atomic_ctx = 1; int ret; int fast = 1; @@ -4628,18 +4611,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev) IPADBG("SMMU mapping created\n"); cb->valid = true; - IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev); - if (smmu_info.disable_htw) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw)) { - IPAERR("couldn't disable coherent HTW\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - } - IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev); if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->mapping->domain, @@ -4694,7 +4665,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx(); int result; - int disable_htw = 1; int atomic_ctx = 1; int fast = 1; int bypass = 1; @@ -4731,18 +4701,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) IPADBG("SMMU mapping created\n"); cb->valid = true; - if (smmu_info.disable_htw) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw)) { - IPAERR("couldn't disable coherent HTW\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU disable HTW\n"); - } - if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->mapping->domain, DOMAIN_ATTR_S1_BYPASS, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c index 08ed47f3cacf..d14f8da15595 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c @@ -150,10 +150,16 @@ int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data) { int ret; + if (!ipa_ctx) { + IPAERR("IPA ctx is null\n"); + return -ENXIO; + } + ret = ipa2_uc_state_check(); if (ret) { ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb; ipa_ctx->uc_ntn_ctx.priv = user_data; + return 0; } return -EEXIST; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 24fbc5c738d8..ab62dbcddd22 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -251,7 +251,6 @@ struct platform_device *ipa3_pdev; static struct { bool present; bool arm_smmu; - bool disable_htw; bool fast_map; bool s1_bypass; bool use_64_bit_dma_mask; @@ -3791,6 +3790,32 @@ static int ipa3_gsi_pre_fw_load_init(void) return 0; } +static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type) +{ + enum gsi_ver gsi_ver; + + switch (ipa_hw_type) { + case IPA_HW_v3_0: + case IPA_HW_v3_1: + gsi_ver = GSI_VER_1_0; + break; + case IPA_HW_v3_5: + gsi_ver = GSI_VER_1_2; + break; + case IPA_HW_v3_5_1: + gsi_ver = GSI_VER_1_3; + break; + default: + IPAERR("No GSI version for ipa type %d\n", ipa_hw_type); + WARN_ON(1); + gsi_ver = GSI_VER_ERR; + } + + IPADBG("GSI version %d\n", gsi_ver); + + return gsi_ver; +} + /** * ipa3_post_init() - Initialize the IPA Driver (Part II). * This part contains all initialization which requires interaction with @@ -3820,6 +3845,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) { memset(&gsi_props, 0, sizeof(gsi_props)); + gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type); gsi_props.ee = resource_p->ee; gsi_props.intr = GSI_INTR_IRQ; gsi_props.irq = resource_p->transport_irq; @@ -4695,9 +4721,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, ipa_drv_res->ipa_tz_unlock_reg_num = 0; ipa_drv_res->ipa_tz_unlock_reg = NULL; - smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node, - "qcom,smmu-disable-htw"); - /* Get IPA HW Version */ result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver", &ipa_drv_res->ipa_hw_type); @@ -4953,7 +4976,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev, static int ipa_smmu_wlan_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx(); - int disable_htw = 1; int atomic_ctx = 1; int fast = 1; int bypass = 1; @@ -4973,17 +4995,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev) } cb->valid = true; - if (smmu_info.disable_htw) { - ret = iommu_domain_set_attr(cb->iommu, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (ret) { - IPAERR("couldn't disable coherent HTW\n"); - cb->valid = false; - return -EIO; - } - } - if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->iommu, DOMAIN_ATTR_S1_BYPASS, @@ -5056,7 +5067,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev) static int ipa_smmu_uc_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx(); - int disable_htw = 1; int atomic_ctx = 1; int bypass = 1; int fast = 1; @@ -5102,18 +5112,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev) IPADBG("SMMU mapping created\n"); cb->valid = true; - IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev); - if (smmu_info.disable_htw) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw)) { - IPAERR("couldn't disable coherent HTW\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - } - IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev); if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->mapping->domain, @@ -5168,7 +5166,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) { struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(); int result; - int disable_htw = 1; int atomic_ctx = 1; int fast = 1; int bypass = 1; @@ -5216,17 +5213,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev) IPADBG("SMMU mapping created\n"); cb->valid = true; - if (smmu_info.disable_htw) { - if (iommu_domain_set_attr(cb->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw)) { - IPAERR("couldn't disable coherent HTW\n"); - arm_iommu_release_mapping(cb->mapping); - cb->valid = false; - return -EIO; - } - IPADBG("SMMU disable HTW\n"); - } if (smmu_info.s1_bypass) { if (iommu_domain_set_attr(cb->mapping->domain, DOMAIN_ATTR_S1_BYPASS, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 6f86448319db..8e85822d9719 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -481,7 +481,7 @@ struct ipa_gsi_ep_mem_info { struct ipa3_status_stats { struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM]; - int curr; + unsigned int curr; }; /** diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c index 6d826590cabc..45fedfa72bda 100644 --- a/drivers/platform/msm/msm_11ad/msm_11ad.c +++ b/drivers/platform/msm/msm_11ad/msm_11ad.c @@ -569,7 +569,6 @@ err_disable_vregs: static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx) { - int disable_htw = 1; int atomic_ctx = 1; int rc; int bypass_enable = 1; @@ -587,17 +586,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx) dev_info(ctx->dev, "IOMMU mapping created: %p\n", ctx->mapping); rc = iommu_domain_set_attr(ctx->mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (rc) { - /* This error can be ignored and not considered fatal, - * but let the users know this happened - */ - dev_err(ctx->dev, "Warning: disable coherent HTW failed (%d)\n", - rc); - } - - rc = iommu_domain_set_attr(ctx->mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx); if (rc) { diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c index 0bbda4eb4116..78e685f789cd 100644 --- a/drivers/platform/msm/qpnp-revid.c +++ b/drivers/platform/msm/qpnp-revid.c @@ -27,6 +27,7 @@ #define REVID_SUBTYPE 0x5 #define REVID_STATUS1 0x8 #define REVID_SPARE_0 0x60 +#define REVID_FAB_ID 0xf2 #define QPNP_REVID_DEV_NAME "qcom,qpnp-revid" @@ -154,7 +155,7 @@ static size_t build_pmic_string(char *buf, size_t n, int sid, static int qpnp_revid_probe(struct platform_device *pdev) { u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status; - u8 option1, option2, option3, option4, spare0; + u8 option1, option2, option3, option4, spare0, fab_id; unsigned int base; int rc; char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'}; @@ -199,6 +200,11 @@ static int qpnp_revid_probe(struct platform_device *pdev) pmic_subtype = PMI8937_PERIPHERAL_SUBTYPE; } + if (of_property_read_bool(pdev->dev.of_node, "qcom,fab-id-valid")) + fab_id = qpnp_read_byte(regmap, base + REVID_FAB_ID); + else + fab_id = -EINVAL; + revid_chip = devm_kzalloc(&pdev->dev, sizeof(struct revid_chip), GFP_KERNEL); if (!revid_chip) @@ -211,6 +217,7 @@ static int qpnp_revid_probe(struct platform_device *pdev) revid_chip->data.rev4 = rev4; revid_chip->data.pmic_subtype = pmic_subtype; revid_chip->data.pmic_type = pmic_type; + revid_chip->data.fab_id = fab_id; if (pmic_subtype < ARRAY_SIZE(pmic_names)) revid_chip->data.pmic_name = pmic_names[pmic_subtype]; diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c index 8fa4fe301676..ee576d300054 100644 --- a/drivers/power/qcom-charger/qpnp-smb2.c +++ b/drivers/power/qcom-charger/qpnp-smb2.c @@ -58,6 +58,13 @@ static struct smb_params v1_params = { .max_u = 4800000, .step_u = 25000, }, + .otg_cl = { + .name = "usb otg current limit", + .reg = OTG_CURRENT_LIMIT_CFG_REG, + .min_u = 250000, + .max_u = 2000000, + .step_u = 250000, + }, .dc_icl = { .name = "dc input current limit", .reg = DCIN_CURRENT_LIMIT_CFG_REG, @@ -202,6 +209,7 @@ struct smb_dt_props { bool no_battery; int fcc_ua; int usb_icl_ua; + int otg_cl_ua; int dc_icl_ua; int fv_uv; int wipower_max_uw; @@ -226,6 +234,7 @@ module_param_named( pl_master_percent, __pl_master_percent, int, S_IRUSR | S_IWUSR ); +#define MICRO_1P5A 1500000 static int smb2_parse_dt(struct smb2 *chip) { struct smb_charger *chg = &chip->chg; @@ -278,6 +287,11 @@ static int smb2_parse_dt(struct smb2 *chip) chip->dt.usb_icl_ua = -EINVAL; rc = of_property_read_u32(node, + "qcom,otg-cl-ua", &chip->dt.otg_cl_ua); + if (rc < 0) + chip->dt.otg_cl_ua = MICRO_1P5A; + + rc = of_property_read_u32(node, "qcom,dc-icl-ua", &chip->dt.dc_icl_ua); if (rc < 0) chip->dt.dc_icl_ua = -EINVAL; @@ -981,11 +995,15 @@ static int smb2_init_hw(struct smb2 *chip) smblib_get_charge_param(chg, &chg->param.dc_icl, &chip->dt.dc_icl_ua); + chg->otg_cl_ua = chip->dt.otg_cl_ua; + /* votes must be cast before configuring software control */ vote(chg->pl_disable_votable, - USBIN_ICL_VOTER, true, 0); + PL_INDIRECT_VOTER, true, 0); vote(chg->pl_disable_votable, CHG_STATE_VOTER, true, 0); + vote(chg->pl_disable_votable, + PARALLEL_PSY_VOTER, true, 0); vote(chg->usb_suspend_votable, DEFAULT_VOTER, chip->dt.no_battery, 0); vote(chg->dc_suspend_votable, @@ -1100,6 +1118,7 @@ static int smb2_init_hw(struct smb2 *chip) static int smb2_setup_wa_flags(struct smb2 *chip) { + struct smb_charger *chg = &chip->chg; struct pmic_revid_data *pmic_rev_id; struct device_node *revid_dev_node; @@ -1122,6 +1141,8 @@ static int smb2_setup_wa_flags(struct smb2 *chip) switch (pmic_rev_id->pmic_subtype) { case PMICOBALT_SUBTYPE: + if (pmic_rev_id->rev4 == PMICOBALT_V1P1_REV4) /* PMI rev 1.1 */ + chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT; break; default: pr_err("PMIC subtype %d not supported\n", diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c index a869fc592474..0067ec5c2ca2 100644 --- a/drivers/power/qcom-charger/smb-lib.c +++ b/drivers/power/qcom-charger/smb-lib.c @@ -12,6 +12,7 @@ #include <linux/device.h> #include <linux/regmap.h> +#include <linux/delay.h> #include <linux/iio/consumer.h> #include <linux/power_supply.h> #include <linux/regulator/driver.h> @@ -521,7 +522,7 @@ static int smblib_fcc_max_vote_callback(struct votable *votable, void *data, { struct smb_charger *chg = data; - return vote(chg->fcc_votable, FCC_MAX_RESULT, true, fcc_ua); + return vote(chg->fcc_votable, FCC_MAX_RESULT_VOTER, true, fcc_ua); } static int smblib_fcc_vote_callback(struct votable *votable, void *data, @@ -642,6 +643,31 @@ suspend: return rc; } +#define MICRO_250MA 250000 +static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua) +{ + int rc = 0; + + rc = smblib_set_charge_param(chg, &chg->param.otg_cl, otg_cl_ua); + if (rc < 0) { + dev_err(chg->dev, "Couldn't set otg current limit rc=%d\n", + rc); + return rc; + } + + /* configure PFM/PWM mode for OTG regulator */ + rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG, + ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT, + otg_cl_ua > MICRO_250MA ? 1 : 0); + if (rc < 0) { + dev_err(chg->dev, + "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc); + return rc; + } + + return rc; +} + static int smblib_dc_icl_vote_callback(struct votable *votable, void *data, int icl_ua, const char *client) { @@ -731,18 +757,51 @@ static int smblib_chg_disable_vote_callback(struct votable *votable, void *data, return 0; } + +static int smblib_pl_enable_indirect_vote_callback(struct votable *votable, + void *data, int chg_enable, const char *client) +{ + struct smb_charger *chg = data; + + vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0); + + return 0; +} + /***************** * OTG REGULATOR * *****************/ +#define OTG_SOFT_START_DELAY_MS 20 int smblib_vbus_regulator_enable(struct regulator_dev *rdev) { struct smb_charger *chg = rdev_get_drvdata(rdev); + u8 stat; int rc = 0; - rc = regmap_write(chg->regmap, CMD_OTG_REG, OTG_EN_BIT); - if (rc < 0) + rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG, + ENG_BUCKBOOST_HALT1_8_MODE_BIT, + ENG_BUCKBOOST_HALT1_8_MODE_BIT); + if (rc < 0) { + dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n", + rc); + return rc; + } + + rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT); + if (rc < 0) { dev_err(chg->dev, "Couldn't enable OTG regulator rc=%d\n", rc); + return rc; + } + + msleep(OTG_SOFT_START_DELAY_MS); + rc = smblib_read(chg, OTG_STATUS_REG, &stat); + if (rc < 0) { + dev_err(chg->dev, "Couldn't read OTG_STATUS_REG rc=%d\n", rc); + return rc; + } + if (stat & BOOST_SOFTSTART_DONE_BIT) + smblib_otg_cl_config(chg, chg->otg_cl_ua); return rc; } @@ -752,9 +811,22 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev) struct smb_charger *chg = rdev_get_drvdata(rdev); int rc = 0; - rc = regmap_write(chg->regmap, CMD_OTG_REG, 0); - if (rc < 0) + rc = smblib_write(chg, CMD_OTG_REG, 0); + if (rc < 0) { dev_err(chg->dev, "Couldn't disable OTG regulator rc=%d\n", rc); + return rc; + } + + smblib_otg_cl_config(chg, MICRO_250MA); + + rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG, + ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0); + if (rc < 0) { + dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n", + rc); + return rc; + } + return rc; } @@ -1144,13 +1216,14 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg, chg->system_temp_level = val->intval; if (chg->system_temp_level == chg->thermal_levels) - return vote(chg->chg_disable_votable, THERMAL_DAEMON, true, 0); + return vote(chg->chg_disable_votable, + THERMAL_DAEMON_VOTER, true, 0); - vote(chg->chg_disable_votable, THERMAL_DAEMON, false, 0); + vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0); if (chg->system_temp_level == 0) - return vote(chg->fcc_votable, THERMAL_DAEMON, false, 0); + return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0); - vote(chg->fcc_votable, THERMAL_DAEMON, true, + vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true, chg->thermal_mitigation[chg->system_temp_level]); return 0; } @@ -1589,7 +1662,11 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg, return rc; } - chg->voltage_min_uv = val->intval; + if (chg->mode == PARALLEL_MASTER) + vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, + min_uv > MICRO_5V, 0); + + chg->voltage_min_uv = min_uv; return rc; } @@ -1607,7 +1684,7 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg, return rc; } - chg->voltage_max_uv = val->intval; + chg->voltage_max_uv = max_uv; return rc; } @@ -1875,49 +1952,23 @@ skip_dpdm_float: return IRQ_HANDLED; } -#define MICRO_5P5V 5500000 -#define USB_WEAK_INPUT_MA 1500000 -static bool is_icl_pl_ready(struct smb_charger *chg) +#define USB_WEAK_INPUT_MA 1400000 +irqreturn_t smblib_handle_icl_change(int irq, void *data) { - union power_supply_propval pval = {0, }; + struct smb_irq_data *irq_data = data; + struct smb_charger *chg = irq_data->parent_data; int icl_ma; int rc; - rc = smblib_get_prop_usb_voltage_now(chg, &pval); + rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &icl_ma); if (rc < 0) { - dev_err(chg->dev, "Couldn't get prop usb voltage rc=%d\n", rc); - return false; - } - - if (pval.intval <= MICRO_5P5V) { - rc = smblib_get_charge_param(chg, - &chg->param.icl_stat, &icl_ma); - if (rc < 0) { - dev_err(chg->dev, "Couldn't get ICL status rc=%d\n", - rc); - return false; - } - - if (icl_ma < USB_WEAK_INPUT_MA) - return false; + dev_err(chg->dev, "Couldn't get ICL status rc=%d\n", rc); + return IRQ_HANDLED; } - /* - * Always enable parallel charging when USB INPUT is higher than 5V - * regardless of the AICL results. Assume chargers above 5V are strong - */ - - return true; -} - -irqreturn_t smblib_handle_icl_change(int irq, void *data) -{ - struct smb_irq_data *irq_data = data; - struct smb_charger *chg = irq_data->parent_data; - if (chg->mode == PARALLEL_MASTER) - vote(chg->pl_disable_votable, USBIN_ICL_VOTER, - !is_icl_pl_ready(chg), 0); + vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, + icl_ma >= USB_WEAK_INPUT_MA, 0); smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name); @@ -1954,12 +2005,27 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg, if (!rising) return; + if (chg->mode == PARALLEL_MASTER) + vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0); + /* the APSD done handler will set the USB supply type */ apsd_result = smblib_get_apsd_result(chg); smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n", apsd_result->name); } +static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg, + bool rising, bool qc_charger) +{ + if (rising && !qc_charger) { + vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0); + power_supply_changed(chg->usb_psy); + } + + smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n", + rising ? "rising" : "falling"); +} + /* triggers when HVDCP is detected */ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg, bool rising) @@ -1991,8 +2057,9 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising) vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0); break; case DCP_CHARGER_BIT: - schedule_delayed_work(&chg->hvdcp_detect_work, - msecs_to_jiffies(HVDCP_DET_MS)); + if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT) + schedule_delayed_work(&chg->hvdcp_detect_work, + msecs_to_jiffies(HVDCP_DET_MS)); break; default: break; @@ -2026,6 +2093,10 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data) smblib_handle_hvdcp_detect_done(chg, (bool)(stat & QC_CHARGER_BIT)); + smblib_handle_hvdcp_check_timeout(chg, + (bool)(stat & HVDCP_CHECK_TIMEOUT_BIT), + (bool)(stat & QC_CHARGER_BIT)); + smblib_handle_hvdcp_3p0_auth_done(chg, (bool)(stat & QC_AUTH_DONE_STATUS_BIT)); @@ -2079,8 +2150,9 @@ static void smblib_handle_typec_debounce_done(struct smb_charger *chg, !rising || sink_attached, 0); if (!rising || sink_attached) { - /* icl votes to disable parallel charging */ - vote(chg->pl_disable_votable, USBIN_ICL_VOTER, true, 0); + /* reset both usbin current and voltage votes */ + vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0); + vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0); /* reset taper_end voter here */ vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0); } @@ -2105,11 +2177,6 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data) } smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat); - if (stat & TYPEC_VBUS_ERROR_STATUS_BIT) { - dev_err(chg->dev, "IRQ: vbus-error rising\n"); - return IRQ_HANDLED; - } - smblib_handle_typec_cc(chg, (bool)(stat & CC_ATTACHED_BIT)); smblib_handle_typec_debounce_done(chg, @@ -2118,6 +2185,10 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data) power_supply_changed(chg->usb_psy); + if (stat & TYPEC_VBUS_ERROR_STATUS_BIT) + smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n", + irq_data->name); + return IRQ_HANDLED; } @@ -2178,8 +2249,7 @@ static void smblib_pl_detect_work(struct work_struct *work) struct smb_charger *chg = container_of(work, struct smb_charger, pl_detect_work); - if (!get_effective_result_locked(chg->pl_disable_votable)) - rerun_election(chg->pl_disable_votable); + vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0); } #define MINIMUM_PARALLEL_FCC_UA 500000 @@ -2204,7 +2274,7 @@ static void smblib_pl_taper_work(struct work_struct *work) } if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) { - vote(chg->awake_votable, PL_VOTER, true, 0); + vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, true, 0); /* Reduce the taper percent by 25 percent */ chg->pl.taper_percent = chg->pl.taper_percent * TAPER_RESIDUAL_PERCENT / 100; @@ -2218,7 +2288,7 @@ static void smblib_pl_taper_work(struct work_struct *work) * Master back to Fast Charge, get out of this round of taper reduction */ done: - vote(chg->awake_votable, PL_VOTER, false, 0); + vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, false, 0); } static void clear_hdc_work(struct work_struct *work) @@ -2320,6 +2390,15 @@ static int smblib_create_votables(struct smb_charger *chg) return rc; } + chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT", + VOTE_SET_ANY, + smblib_pl_enable_indirect_vote_callback, + chg); + if (IS_ERR(chg->pl_enable_votable_indirect)) { + rc = PTR_ERR(chg->pl_enable_votable_indirect); + return rc; + } + return rc; } @@ -2345,6 +2424,10 @@ static void smblib_destroy_votables(struct smb_charger *chg) destroy_votable(chg->awake_votable); if (chg->pl_disable_votable) destroy_votable(chg->pl_disable_votable); + if (chg->chg_disable_votable) + destroy_votable(chg->chg_disable_votable); + if (chg->pl_enable_votable_indirect) + destroy_votable(chg->pl_enable_votable_indirect); } static void smblib_iio_deinit(struct smb_charger *chg) @@ -2381,9 +2464,6 @@ int smblib_init(struct smb_charger *chg) return rc; } - chg->bms_psy = power_supply_get_by_name("bms"); - chg->pl.psy = power_supply_get_by_name("parallel"); - rc = smblib_register_notifier(chg); if (rc < 0) { dev_err(chg->dev, @@ -2391,6 +2471,12 @@ int smblib_init(struct smb_charger *chg) return rc; } + chg->bms_psy = power_supply_get_by_name("bms"); + chg->pl.psy = power_supply_get_by_name("parallel"); + if (chg->pl.psy) + vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER, + false, 0); + break; case PARALLEL_SLAVE: break; diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h index 1f2457e04ed6..5b4c2016adc8 100644 --- a/drivers/power/qcom-charger/smb-lib.h +++ b/drivers/power/qcom-charger/smb-lib.h @@ -24,16 +24,19 @@ enum print_reason { PR_MISC = BIT(2), }; -#define DEFAULT_VOTER "DEFAULT_VOTER" -#define USER_VOTER "USER_VOTER" -#define PD_VOTER "PD_VOTER" -#define PL_VOTER "PL_VOTER" -#define USBIN_ICL_VOTER "USBIN_ICL_VOTER" -#define CHG_STATE_VOTER "CHG_STATE_VOTER" -#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER" -#define TAPER_END_VOTER "TAPER_END_VOTER" -#define FCC_MAX_RESULT "FCC_MAX_RESULT" -#define THERMAL_DAEMON "THERMAL_DAEMON" +#define DEFAULT_VOTER "DEFAULT_VOTER" +#define USER_VOTER "USER_VOTER" +#define PD_VOTER "PD_VOTER" +#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER" +#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER" +#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER" +#define USBIN_I_VOTER "USBIN_I_VOTER" +#define USBIN_V_VOTER "USBIN_V_VOTER" +#define CHG_STATE_VOTER "CHG_STATE_VOTER" +#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER" +#define TAPER_END_VOTER "TAPER_END_VOTER" +#define FCC_MAX_RESULT_VOTER "FCC_MAX_RESULT_VOTER" +#define THERMAL_DAEMON_VOTER "THERMAL_DAEMON_VOTER" enum smb_mode { PARALLEL_MASTER = 0, @@ -41,6 +44,10 @@ enum smb_mode { NUM_MODES, }; +enum { + QC_CHARGER_DETECTION_WA_BIT = BIT(0), +}; + struct smb_regulator { struct regulator_dev *rdev; struct regulator_desc rdesc; @@ -70,6 +77,7 @@ struct smb_params { struct smb_chg_param fv; struct smb_chg_param usb_icl; struct smb_chg_param icl_stat; + struct smb_chg_param otg_cl; struct smb_chg_param dc_icl; struct smb_chg_param dc_icl_pt_lv; struct smb_chg_param dc_icl_pt_hv; @@ -139,6 +147,7 @@ struct smb_charger { struct votable *awake_votable; struct votable *pl_disable_votable; struct votable *chg_disable_votable; + struct votable *pl_enable_votable_indirect; /* work */ struct work_struct bms_update_work; @@ -159,6 +168,8 @@ struct smb_charger { int thermal_levels; int *thermal_mitigation; + int otg_cl_ua; + int fake_capacity; bool step_chg_enabled; diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h index 4a50d2fcbf97..8a49a8fb38ba 100644 --- a/drivers/power/qcom-charger/smb-reg.h +++ b/drivers/power/qcom-charger/smb-reg.h @@ -366,6 +366,9 @@ enum { #define OTG_EN_SRC_CFG_BIT BIT(1) #define CONCURRENT_MODE_CFG_BIT BIT(0) +#define OTG_ENG_OTG_CFG_REG (OTG_BASE + 0xC0) +#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0) + /* BATIF Peripheral Registers */ /* BATIF Interrupt Bits */ #define BAT_7_RT_STS_BIT BIT(7) @@ -427,7 +430,7 @@ enum { #define APSD_STATUS_REG (USBIN_BASE + 0x07) #define APSD_STATUS_7_BIT BIT(7) -#define APSD_STATUS_6_BIT BIT(6) +#define HVDCP_CHECK_TIMEOUT_BIT BIT(6) #define SLOW_PLUGIN_TIMEOUT_BIT BIT(5) #define ENUMERATION_DONE_BIT BIT(4) #define VADP_CHANGE_DONE_AFTER_AUTH_BIT BIT(3) @@ -766,6 +769,13 @@ enum { ZIN_ICL_HV_MAX_MV = 11000, }; +#define DC_ENG_SSUPPLY_CFG3_REG (DCIN_BASE + 0xC2) +#define ENG_SSUPPLY_HI_CAP_BIT BIT(6) +#define ENG_SSUPPLY_HI_RES_BIT BIT(5) +#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT BIT(3) +#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT BIT(2) +#define ENG_SSUPPLY_5V_OV_OPT_BIT BIT(0) + /* MISC Peripheral Registers */ #define REVISION1_REG (MISC_BASE + 0x00) #define DIG_MINOR_MASK GENMASK(7, 0) diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c index e990425bd63a..727a768e63eb 100644 --- a/drivers/power/qcom/msm-core.c +++ b/drivers/power/qcom/msm-core.c @@ -240,10 +240,10 @@ void trigger_cpu_pwr_stats_calc(void) if (cpu_node->sensor_id < 0) continue; - if (cpu_node->temp == prev_temp[cpu]) + if (cpu_node->temp == prev_temp[cpu]) { sensor_get_temp(cpu_node->sensor_id, &temp); - - cpu_node->temp = temp / scaling_factor; + cpu_node->temp = temp / scaling_factor; + } prev_temp[cpu] = cpu_node->temp; @@ -373,7 +373,7 @@ static int update_userspace_power(struct sched_params __user *argp) { int i; int ret; - int cpu; + int cpu = -1; struct cpu_activity_info *node; struct cpu_static_info *sp, *clear_sp; int cpumask, cluster, mpidr; @@ -396,7 +396,7 @@ static int update_userspace_power(struct sched_params __user *argp) } } - if (cpu >= num_possible_cpus()) + if ((cpu < 0) || (cpu >= num_possible_cpus())) return -EINVAL; node = &activity[cpu]; diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c index 75a0de0c532b..2f109013f723 100644 --- a/drivers/power/reset/msm-poweroff.c +++ b/drivers/power/reset/msm-poweroff.c @@ -36,6 +36,7 @@ #define EMERGENCY_DLOAD_MAGIC1 0x322A4F99 #define EMERGENCY_DLOAD_MAGIC2 0xC67E4350 #define EMERGENCY_DLOAD_MAGIC3 0x77777777 +#define EMMC_DLOAD_TYPE 0x2 #define SCM_IO_DISABLE_PMIC_ARBITER 1 #define SCM_IO_DEASSERT_PS_HOLD 2 @@ -46,12 +47,20 @@ static int restart_mode; -void *restart_reason; +static void *restart_reason, *dload_type_addr; static bool scm_pmic_arbiter_disable_supported; static bool scm_deassert_ps_hold_supported; /* Download mode master kill-switch */ static void __iomem *msm_ps_hold; static phys_addr_t tcsr_boot_misc_detect; +static void scm_disable_sdi(void); + +/* Runtime could be only changed value once. + * There is no API from TZ to re-enable the registers. + * So the SDI cannot be re-enabled when it already by-passed. +*/ +static int download_mode = 1; +static struct kobject dload_kobj; #ifdef CONFIG_QCOM_DLOAD_MODE #define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode" @@ -64,9 +73,23 @@ static void *emergency_dload_mode_addr; static bool scm_dload_supported; static int dload_set(const char *val, struct kernel_param *kp); -static int download_mode = 1; +/* interface for exporting attributes */ +struct reset_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct attribute *attr, + char *buf); + size_t (*store)(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count); +}; +#define to_reset_attr(_attr) \ + container_of(_attr, struct reset_attribute, attr) +#define RESET_ATTR(_name, _mode, _show, _store) \ + static struct reset_attribute reset_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + module_param_call(download_mode, dload_set, param_get_int, &download_mode, 0644); + static int panic_prep_restart(struct notifier_block *this, unsigned long event, void *ptr) { @@ -170,7 +193,10 @@ static int dload_set(const char *val, struct kernel_param *kp) return 0; } #else -#define set_dload_mode(x) do {} while (0) +static void set_dload_mode(int on) +{ + return; +} static void enable_emergency_dload_mode(void) { @@ -183,6 +209,26 @@ static bool get_dload_mode(void) } #endif +static void scm_disable_sdi(void) +{ + int ret; + struct scm_desc desc = { + .args[0] = 1, + .args[1] = 0, + .arginfo = SCM_ARGS(2), + }; + + /* Needed to bypass debug image on some chips */ + if (!is_scm_armv8()) + ret = scm_call_atomic2(SCM_SVC_BOOT, + SCM_WDOG_DEBUG_BOOT_PART, 1, 0); + else + ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, + SCM_WDOG_DEBUG_BOOT_PART), &desc); + if (ret) + pr_err("Failed to disable secure wdog debug: %d\n", ret); +} + void msm_set_restart_mode(int mode) { restart_mode = mode; @@ -320,13 +366,6 @@ static void deassert_ps_hold(void) static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd) { - int ret; - struct scm_desc desc = { - .args[0] = 1, - .args[1] = 0, - .arginfo = SCM_ARGS(2), - }; - pr_notice("Going down for restart now\n"); msm_restart_prepare(cmd); @@ -341,16 +380,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd) msm_trigger_wdog_bite(); #endif - /* Needed to bypass debug image on some chips */ - if (!is_scm_armv8()) - ret = scm_call_atomic2(SCM_SVC_BOOT, - SCM_WDOG_DEBUG_BOOT_PART, 1, 0); - else - ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, - SCM_WDOG_DEBUG_BOOT_PART), &desc); - if (ret) - pr_err("Failed to disable secure wdog debug: %d\n", ret); - + scm_disable_sdi(); halt_spmi_pmic_arbiter(); deassert_ps_hold(); @@ -359,27 +389,11 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd) static void do_msm_poweroff(void) { - int ret; - struct scm_desc desc = { - .args[0] = 1, - .args[1] = 0, - .arginfo = SCM_ARGS(2), - }; - pr_notice("Powering off the SoC\n"); -#ifdef CONFIG_QCOM_DLOAD_MODE + set_dload_mode(0); -#endif + scm_disable_sdi(); qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN); - /* Needed to bypass debug image on some chips */ - if (!is_scm_armv8()) - ret = scm_call_atomic2(SCM_SVC_BOOT, - SCM_WDOG_DEBUG_BOOT_PART, 1, 0); - else - ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, - SCM_WDOG_DEBUG_BOOT_PART), &desc); - if (ret) - pr_err("Failed to disable wdog debug: %d\n", ret); halt_spmi_pmic_arbiter(); deassert_ps_hold(); @@ -389,6 +403,84 @@ static void do_msm_poweroff(void) return; } +static ssize_t attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct reset_attribute *reset_attr = to_reset_attr(attr); + ssize_t ret = -EIO; + + if (reset_attr->show) + ret = reset_attr->show(kobj, attr, buf); + + return ret; +} + +static ssize_t attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct reset_attribute *reset_attr = to_reset_attr(attr); + ssize_t ret = -EIO; + + if (reset_attr->store) + ret = reset_attr->store(kobj, attr, buf, count); + + return ret; +} + +static const struct sysfs_ops reset_sysfs_ops = { + .show = attr_show, + .store = attr_store, +}; + +static struct kobj_type reset_ktype = { + .sysfs_ops = &reset_sysfs_ops, +}; + +static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + uint32_t read_val, show_val; + + read_val = __raw_readl(dload_type_addr); + if (read_val == EMMC_DLOAD_TYPE) + show_val = 1; + else + show_val = 0; + + return snprintf(buf, sizeof(show_val), "%u\n", show_val); +} + +static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + uint32_t enabled; + int ret; + + ret = kstrtouint(buf, 0, &enabled); + if (ret < 0) + return ret; + + if (!((enabled == 0) || (enabled == 1))) + return -EINVAL; + + if (enabled == 1) + __raw_writel(EMMC_DLOAD_TYPE, dload_type_addr); + else + __raw_writel(0, dload_type_addr); + + return count; +} +RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload); + +static struct attribute *reset_attrs[] = { + &reset_attr_emmc_dload.attr, + NULL +}; + +static struct attribute_group reset_attr_group = { + .attrs = reset_attrs, +}; + static int msm_restart_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -419,6 +511,33 @@ static int msm_restart_probe(struct platform_device *pdev) pr_err("unable to map imem EDLOAD mode offset\n"); } + np = of_find_compatible_node(NULL, NULL, + "qcom,msm-imem-dload-type"); + if (!np) { + pr_err("unable to find DT imem dload-type node\n"); + goto skip_sysfs_create; + } else { + dload_type_addr = of_iomap(np, 0); + if (!dload_type_addr) { + pr_err("unable to map imem dload-type offset\n"); + goto skip_sysfs_create; + } + } + + ret = kobject_init_and_add(&dload_kobj, &reset_ktype, + kernel_kobj, "%s", "dload"); + if (ret) { + pr_err("%s:Error in creation kobject_add\n", __func__); + kobject_put(&dload_kobj); + goto skip_sysfs_create; + } + + ret = sysfs_create_group(&dload_kobj, &reset_attr_group); + if (ret) { + pr_err("%s:Error in creation sysfs_create_group\n", __func__); + kobject_del(&dload_kobj); + } +skip_sysfs_create: #endif np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-restart_reason"); @@ -454,6 +573,8 @@ static int msm_restart_probe(struct platform_device *pdev) download_mode = scm_is_secure_device(); set_dload_mode(download_mode); + if (!download_mode) + scm_disable_sdi(); return 0; diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c index ac71f2c75472..6d0c1fbe566b 100644 --- a/drivers/pwm/pwm-qpnp.c +++ b/drivers/pwm/pwm-qpnp.c @@ -1879,7 +1879,7 @@ static int qpnp_parse_dt_config(struct platform_device *pdev, int rc, enable, lut_entry_size, list_size, i; const char *lable; const __be32 *prop; - u64 size; + u32 size; struct device_node *node; int found_pwm_subnode = 0; int found_lpg_subnode = 0; @@ -1968,11 +1968,18 @@ static int qpnp_parse_dt_config(struct platform_device *pdev, return rc; prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE, - &size, 0); + 0, 0); if (!prop) { chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED; } else { lpg_config->lut_base_addr = be32_to_cpu(*prop); + rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size); + if (rc < 0) { + dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n", + rc); + return rc; + } + /* * Each entry of LUT is of 2 bytes for generic LUT and of 1 byte * for KPDBL/GLED LUT. diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 27a5deb1213e..80a9f0ee288b 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4223,7 +4223,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) debugfs_create_file("consumers", 0444, rdev->debugfs, rdev, ®_consumers_fops); - reg = regulator_get(NULL, rdev->desc->name); + reg = regulator_get(NULL, rdev_get_name(rdev)); if (IS_ERR(reg) || reg == NULL) { pr_err("Error-Bad Function Input\n"); goto error; diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c index cd02debc37aa..0df2b80ceca5 100644 --- a/drivers/regulator/cpr3-regulator.c +++ b/drivers/regulator/cpr3-regulator.c @@ -1264,6 +1264,8 @@ static void cprh_controller_program_sdelta( mb(); } +static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl); + /** * cpr3_regulator_init_cprh() - performs hardware initialization at the * controller and thread level required for CPRh operation. @@ -1290,6 +1292,16 @@ static int cpr3_regulator_init_cprh(struct cpr3_controller *ctrl) return -EINVAL; } + rc = cprh_regulator_aging_adjust(ctrl); + if (rc && rc != -ETIMEDOUT) { + /* + * Don't fail initialization if the CPR aging measurement + * timed out due to sensors not being available. + */ + cpr3_err(ctrl, "CPR aging adjustment failed, rc=%d\n", rc); + return rc; + } + cprh_controller_program_sdelta(ctrl); rc = cpr3_regulator_init_cprh_corners(&ctrl->thread[0].vreg[0]); @@ -3346,7 +3358,7 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl, u32 mask, reg, result, quot_min, quot_max, sel_min, sel_max; u32 quot_min_scaled, quot_max_scaled; u32 gcnt, gcnt_ref, gcnt0_restore, gcnt1_restore, irq_restore; - u32 cont_dly_restore, up_down_dly_restore = 0; + u32 ro_mask_restore, cont_dly_restore, up_down_dly_restore = 0; int quot_delta, quot_delta_scaled, quot_delta_scaled_sum; int *quot_delta_results; int rc, rc2, i, aging_measurement_count, filtered_count; @@ -3379,7 +3391,8 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl, /* Switch from HW to SW closed-loop if necessary */ if (ctrl->supports_hw_closed_loop) { - if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { + if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 || + ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) { cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE); @@ -3397,6 +3410,10 @@ static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl, cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt); cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt); + /* Unmask all RO's */ + ro_mask_restore = cpr3_read(ctrl, CPR3_REG_RO_MASK(0)); + cpr3_write(ctrl, CPR3_REG_RO_MASK(0), 0); + /* * Mask all sensors except for the one to measure and bypass all * sensors in collapsible domains. @@ -3535,6 +3552,8 @@ cleanup: cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_restore); + cpr3_write(ctrl, CPR3_REG_RO_MASK(0), ro_mask_restore); + cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt0_restore); cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt1_restore); @@ -3565,7 +3584,8 @@ cleanup: CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID); if (ctrl->supports_hw_closed_loop) { - if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) { + if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4 || + ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) { cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL, CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK, ctrl->use_hw_closed_loop @@ -3671,14 +3691,16 @@ static void cpr3_regulator_readjust_volt_and_quot(struct cpr3_regulator *vreg, static void cpr3_regulator_set_aging_ref_adjustment( struct cpr3_controller *ctrl, int ref_adjust_volt) { + struct cpr3_regulator *vreg; int i, j; for (i = 0; i < ctrl->thread_count; i++) { for (j = 0; j < ctrl->thread[i].vreg_count; j++) { - cpr3_regulator_readjust_volt_and_quot( - &ctrl->thread[i].vreg[j], - ctrl->aging_ref_adjust_volt, - ref_adjust_volt); + vreg = &ctrl->thread[i].vreg[j]; + cpr3_regulator_readjust_volt_and_quot(vreg, + ctrl->aging_ref_adjust_volt, ref_adjust_volt); + if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) + cprh_adjust_voltages_for_apm(vreg); } } @@ -3867,6 +3889,126 @@ cleanup: } /** + * cprh_regulator_aging_adjust() - adjust the target quotients and open-loop + * voltages for CPRh regulators based on the output of CPR aging + * sensors + * @ctrl: Pointer to the CPR3 controller + * + * Return: 0 on success, errno on failure + */ +static int cprh_regulator_aging_adjust(struct cpr3_controller *ctrl) +{ + int i, j, id, rc, rc2, aging_volt, init_volt; + int max_aging_volt = 0; + u32 reg; + + if (!ctrl->aging_required || !ctrl->cpr_enabled) + return 0; + + if (!ctrl->vdd_regulator) { + cpr3_err(ctrl, "vdd-supply regulator missing\n"); + return -ENODEV; + } + + init_volt = regulator_get_voltage(ctrl->vdd_regulator); + if (init_volt < 0) { + cpr3_err(ctrl, "could not get vdd-supply voltage, rc=%d\n", + init_volt); + return init_volt; + } + + if (init_volt > ctrl->aging_ref_volt) { + cpr3_info(ctrl, "unable to perform CPR aging measurement as vdd=%d uV > aging voltage=%d uV\n", + init_volt, ctrl->aging_ref_volt); + return 0; + } + + /* Verify that none of the aging sensors are currently masked. */ + for (i = 0; i < ctrl->aging_sensor_count; i++) { + id = ctrl->aging_sensor[i].sensor_id; + reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id)); + if (reg & BIT(id % 32)) { + cpr3_info(ctrl, "unable to perform CPR aging measurement as CPR sensor %d is masked\n", + id); + return 0; + } + } + + rc = regulator_set_voltage(ctrl->vdd_regulator, ctrl->aging_ref_volt, + INT_MAX); + if (rc) { + cpr3_err(ctrl, "unable to set vdd-supply to aging voltage=%d uV, rc=%d\n", + ctrl->aging_ref_volt, rc); + return rc; + } + + if (ctrl->aging_vdd_mode) { + rc = regulator_set_mode(ctrl->vdd_regulator, + ctrl->aging_vdd_mode); + if (rc) { + cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n", + ctrl->aging_vdd_mode, rc); + goto cleanup; + } + } + + /* Perform aging measurement on all aging sensors */ + for (i = 0; i < ctrl->aging_sensor_count; i++) { + for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) { + rc = cpr3_regulator_measure_aging(ctrl, + &ctrl->aging_sensor[i]); + if (!rc) + break; + } + + if (!rc) { + aging_volt = + cpr3_voltage_adjustment( + ctrl->aging_sensor[i].ro_scale, + ctrl->aging_sensor[i].measured_quot_diff + - ctrl->aging_sensor[i].init_quot_diff); + max_aging_volt = max(max_aging_volt, aging_volt); + } else { + cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n", + j, rc); + ctrl->aging_failed = true; + ctrl->aging_required = false; + goto cleanup; + } + } + +cleanup: + /* Adjust the CPR target quotients according to the aging measurement */ + if (!rc) { + cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt); + + cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n", + ctrl->aging_ref_adjust_volt); + ctrl->aging_succeeded = true; + ctrl->aging_required = false; + } + + rc2 = regulator_set_voltage(ctrl->vdd_regulator, init_volt, INT_MAX); + if (rc2) { + cpr3_err(ctrl, "unable to reset vdd-supply to initial voltage=%d uV, rc=%d\n", + init_volt, rc2); + return rc2; + } + + if (ctrl->aging_complete_vdd_mode) { + rc2 = regulator_set_mode(ctrl->vdd_regulator, + ctrl->aging_complete_vdd_mode); + if (rc2) { + cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n", + ctrl->aging_complete_vdd_mode, rc2); + return rc2; + } + } + + return rc; +} + +/** * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller * to reflect the corners used by all CPR3 regulators as well as * the CPR operating mode and perform aging adjustments if needed diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h index 8897def3ef76..ac571271b0d5 100644 --- a/drivers/regulator/cpr3-regulator.h +++ b/drivers/regulator/cpr3-regulator.h @@ -875,6 +875,7 @@ int cpr4_parse_core_count_temp_voltage_adj(struct cpr3_regulator *vreg, bool use_corner_band); int cpr3_apm_init(struct cpr3_controller *ctrl); int cpr3_mem_acc_init(struct cpr3_regulator *vreg); +void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg); #else @@ -1047,6 +1048,10 @@ static inline int cpr3_mem_acc_init(struct cpr3_regulator *vreg) return 0; } +static inline void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg) +{ +} + #endif /* CONFIG_REGULATOR_CPR3 */ #endif /* __REGULATOR_CPR_REGULATOR_H__ */ diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c index 51179f28fcf5..c377a65a6393 100644 --- a/drivers/regulator/cpr3-util.c +++ b/drivers/regulator/cpr3-util.c @@ -1202,6 +1202,23 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl) if (rc) return rc; + ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd"); + if (IS_ERR(ctrl->vdd_regulator)) { + rc = PTR_ERR(ctrl->vdd_regulator); + if (rc != -EPROBE_DEFER) { + /* vdd-supply is optional for CPRh controllers. */ + if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) { + cpr3_debug(ctrl, "unable to request vdd regulator, rc=%d\n", + rc); + ctrl->vdd_regulator = NULL; + return 0; + } + cpr3_err(ctrl, "unable to request vdd regulator, rc=%d\n", + rc); + } + return rc; + } + /* * Regulator device handles are not necessary for CPRh controllers * since communication with the regulators is completely managed @@ -1210,15 +1227,6 @@ int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl) if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPRH) return rc; - ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd"); - if (IS_ERR(ctrl->vdd_regulator)) { - rc = PTR_ERR(ctrl->vdd_regulator); - if (rc != -EPROBE_DEFER) - cpr3_err(ctrl, "unable request vdd regulator, rc=%d\n", - rc); - return rc; - } - ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev, "system"); if (IS_ERR(ctrl->system_regulator)) { @@ -2000,3 +2008,78 @@ done: return rc; } + +/** + * cprh_adjust_voltages_for_apm() - adjust per-corner floor and ceiling voltages + * so that they do not overlap the APM threshold voltage. + * @vreg: Pointer to the CPR3 regulator + * + * The memory array power mux (APM) must be configured for a specific supply + * based upon where the VDD voltage lies with respect to the APM threshold + * voltage. When using CPR hardware closed-loop, the voltage may vary anywhere + * between the floor and ceiling voltage without software notification. + * Therefore, it is required that the floor to ceiling range for every corner + * not intersect the APM threshold voltage. This function adjusts the floor to + * ceiling range for each corner which violates this requirement. + * + * The following algorithm is applied: + * if floor < threshold <= ceiling: + * if open_loop >= threshold, then floor = threshold - adj + * else ceiling = threshold - step + * where: + * adj = APM hysteresis voltage established to minimize the number of + * corners with artificially increased floor voltages + * step = voltage in microvolts of a single step of the VDD supply + * + * The open-loop voltage is also bounded by the new floor or ceiling value as + * needed. + * + * Return: none + */ +void cprh_adjust_voltages_for_apm(struct cpr3_regulator *vreg) +{ + struct cpr3_controller *ctrl = vreg->thread->ctrl; + struct cpr3_corner *corner; + int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop; + + if (!ctrl->apm_threshold_volt) { + /* APM not being used. */ + return; + } + + ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt, + ctrl->step_volt); + ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt); + + threshold = ctrl->apm_threshold_volt; + adj = ctrl->apm_adj_volt; + + for (i = 0; i < vreg->corner_count; i++) { + corner = &vreg->corner[i]; + + if (threshold <= corner->floor_volt + || threshold > corner->ceiling_volt) + continue; + + prev_floor = corner->floor_volt; + prev_ceiling = corner->ceiling_volt; + prev_open_loop = corner->open_loop_volt; + + if (corner->open_loop_volt >= threshold) { + corner->floor_volt = max(corner->floor_volt, + threshold - adj); + if (corner->open_loop_volt < corner->floor_volt) + corner->open_loop_volt = corner->floor_volt; + } else { + corner->ceiling_volt = threshold - ctrl->step_volt; + } + + if (corner->floor_volt != prev_floor + || corner->ceiling_volt != prev_ceiling + || corner->open_loop_volt != prev_open_loop) + cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n", + threshold, adj, i, prev_floor, prev_ceiling, + prev_open_loop, corner->floor_volt, + corner->ceiling_volt, corner->open_loop_volt); + } +} diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c index 284180b0e72f..953ea5f33f40 100644 --- a/drivers/regulator/cprh-kbss-regulator.c +++ b/drivers/regulator/cprh-kbss-regulator.c @@ -54,6 +54,8 @@ * @force_highest_corner: Flag indicating that all corners must operate * at the voltage of the highest corner. This is * applicable to MSMCOBALT only. + * @aging_init_quot_diff: Initial quotient difference between CPR aging + * min and max sensors measured at time of manufacturing * * This struct holds the values for all of the fuses read from memory. */ @@ -65,6 +67,7 @@ struct cprh_msmcobalt_kbss_fuses { u64 speed_bin; u64 cpr_fusing_rev; u64 force_highest_corner; + u64 aging_init_quot_diff; }; /* @@ -192,6 +195,18 @@ msmcobalt_cpr_force_highest_corner_param[] = { {}, }; +static const struct cpr3_fuse_param +msmcobalt_kbss_aging_init_quot_diff_param[2][2] = { + [MSMCOBALT_KBSS_POWER_CLUSTER_ID] = { + {69, 6, 13}, + {}, + }, + [MSMCOBALT_KBSS_PERFORMANCE_CLUSTER_ID] = { + {71, 25, 32}, + {}, + }, +}; + /* * Open loop voltage fuse reference voltages in microvolts for MSMCOBALT v1 */ @@ -225,6 +240,8 @@ msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = { #define MSMCOBALT_KBSS_FUSE_STEP_VOLT 10000 #define MSMCOBALT_KBSS_VOLTAGE_FUSE_SIZE 6 #define MSMCOBALT_KBSS_QUOT_OFFSET_SCALE 5 +#define MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SIZE 8 +#define MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SCALE 1 #define MSMCOBALT_KBSS_POWER_CPR_SENSOR_COUNT 6 #define MSMCOBALT_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 9 @@ -242,6 +259,12 @@ msmcobalt_v2_kbss_fuse_ref_volt[2][MSMCOBALT_KBSS_FUSE_CORNERS] = { #define MSMCOBALT_KBSS_PERFORMANCE_TEMP_SENSOR_ID_START 6 #define MSMCOBALT_KBSS_PERFORMANCE_TEMP_SENSOR_ID_END 11 +#define MSMCOBALT_KBSS_POWER_AGING_SENSOR_ID 0 +#define MSMCOBALT_KBSS_POWER_AGING_BYPASS_MASK0 0 + +#define MSMCOBALT_KBSS_PERFORMANCE_AGING_SENSOR_ID 0 +#define MSMCOBALT_KBSS_PERFORMANCE_AGING_BYPASS_MASK0 0 + /** * cprh_msmcobalt_kbss_read_fuse_data() - load KBSS specific fuse parameter values * @vreg: Pointer to the CPR3 regulator @@ -321,6 +344,15 @@ static int cprh_msmcobalt_kbss_read_fuse_data(struct cpr3_regulator *vreg) } rc = cpr3_read_fuse_param(base, + msmcobalt_kbss_aging_init_quot_diff_param[id], + &fuse->aging_init_quot_diff); + if (rc) { + cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n", + rc); + return rc; + } + + rc = cpr3_read_fuse_param(base, msmcobalt_cpr_force_highest_corner_param, &fuse->force_highest_corner); if (rc) { @@ -826,6 +858,7 @@ static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg) corner->floor_volt = ctrl->apm_crossover_volt; corner->ceiling_volt = ctrl->apm_crossover_volt; corner->open_loop_volt = ctrl->apm_crossover_volt; + corner->abs_ceiling_volt = ctrl->apm_crossover_volt; corner->use_open_loop = true; vreg->corner_count++; @@ -833,79 +866,6 @@ static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg) } /** - * cprh_kbss_adjust_voltages_for_apm() - adjust per-corner floor and ceiling - * voltages so that they do not overlap the APM threshold voltage. - * @vreg: Pointer to the CPR3 regulator - * - * The KBSS memory array power mux (APM) must be configured for a specific - * supply based upon where the VDD voltage lies with respect to the APM - * threshold voltage. When using CPR hardware closed-loop, the voltage may vary - * anywhere between the floor and ceiling voltage without software notification. - * Therefore, it is required that the floor to ceiling range for every corner - * not intersect the APM threshold voltage. This function adjusts the floor to - * ceiling range for each corner which violates this requirement. - * - * The following algorithm is applied in the case that - * floor < threshold <= ceiling: - * if open_loop >= threshold, then floor = threshold - adj - * else ceiling = threshold - step - * where adj = APM hysteresis voltage established to minimize number - * of corners with artificially increased floor voltages - * and step = voltage in microvolts of a single step of the VDD supply - * - * The open-loop voltage is also bounded by the new floor or ceiling value as - * needed. - * - * Return: 0 on success, errno on failure - */ -static int cprh_kbss_adjust_voltages_for_apm(struct cpr3_regulator *vreg) -{ - struct cpr3_controller *ctrl = vreg->thread->ctrl; - struct cpr3_corner *corner; - int i, adj, threshold, prev_ceiling, prev_floor, prev_open_loop; - - if (!ctrl->apm_threshold_volt) { - /* APM not being used. */ - return 0; - } - - ctrl->apm_threshold_volt = CPR3_ROUND(ctrl->apm_threshold_volt, - ctrl->step_volt); - ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt); - - threshold = ctrl->apm_threshold_volt; - adj = ctrl->apm_adj_volt; - - for (i = 0; i < vreg->corner_count; i++) { - corner = &vreg->corner[i]; - - if (threshold <= corner->floor_volt - || threshold > corner->ceiling_volt) - continue; - - prev_floor = corner->floor_volt; - prev_ceiling = corner->ceiling_volt; - prev_open_loop = corner->open_loop_volt; - - if (corner->open_loop_volt >= threshold) { - corner->floor_volt = max(corner->floor_volt, - threshold - adj); - if (corner->open_loop_volt < corner->floor_volt) - corner->open_loop_volt = corner->floor_volt; - } else { - corner->ceiling_volt = threshold - ctrl->step_volt; - } - - cpr3_debug(vreg, "APM threshold=%d, APM adj=%d changed corner %d voltages; prev: floor=%d, ceiling=%d, open-loop=%d; new: floor=%d, ceiling=%d, open-loop=%d\n", - threshold, adj, i, prev_floor, prev_ceiling, - prev_open_loop, corner->floor_volt, - corner->ceiling_volt, corner->open_loop_volt); - } - - return 0; -} - -/** * cprh_msmcobalt_kbss_set_no_interpolation_quotients() - use the fused target * quotient values for lower frequencies. * @vreg: Pointer to the CPR3 regulator @@ -1235,12 +1195,7 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg) return rc; } - rc = cprh_kbss_adjust_voltages_for_apm(vreg); - if (rc) { - cpr3_err(vreg, "unable to adjust voltages for APM\n, rc=%d\n", - rc); - return rc; - } + cprh_adjust_voltages_for_apm(vreg); cpr3_open_loop_voltage_as_ceiling(vreg); @@ -1299,6 +1254,80 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg) } /** + * cprh_kbss_init_aging() - perform KBSS CPRh controller specific aging + * initializations + * @ctrl: Pointer to the CPR3 controller + * + * Return: 0 on success, errno on failure + */ +static int cprh_kbss_init_aging(struct cpr3_controller *ctrl) +{ + struct cprh_msmcobalt_kbss_fuses *fuse = NULL; + struct cpr3_regulator *vreg; + u32 aging_ro_scale; + int i, j, rc; + + for (i = 0; i < ctrl->thread_count; i++) { + for (j = 0; j < ctrl->thread[i].vreg_count; j++) { + if (ctrl->thread[i].vreg[j].aging_allowed) { + ctrl->aging_required = true; + vreg = &ctrl->thread[i].vreg[j]; + fuse = vreg->platform_fuses; + break; + } + } + } + + if (!ctrl->aging_required || !fuse || !vreg) + return 0; + + rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor", + 1, &aging_ro_scale); + if (rc) + return rc; + + if (aging_ro_scale == 0) { + cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n", + aging_ro_scale); + return -EINVAL; + } + + ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL; + ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE; + + ctrl->aging_sensor_count = 1; + ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL); + if (!ctrl->aging_sensor) + return -ENOMEM; + + if (ctrl->ctrl_id == MSMCOBALT_KBSS_POWER_CLUSTER_ID) { + ctrl->aging_sensor->sensor_id + = MSMCOBALT_KBSS_POWER_AGING_SENSOR_ID; + ctrl->aging_sensor->bypass_mask[0] + = MSMCOBALT_KBSS_POWER_AGING_BYPASS_MASK0; + } else { + ctrl->aging_sensor->sensor_id + = MSMCOBALT_KBSS_PERFORMANCE_AGING_SENSOR_ID; + ctrl->aging_sensor->bypass_mask[0] + = MSMCOBALT_KBSS_PERFORMANCE_AGING_BYPASS_MASK0; + } + ctrl->aging_sensor->ro_scale = aging_ro_scale; + + ctrl->aging_sensor->init_quot_diff + = cpr3_convert_open_loop_voltage_fuse(0, + MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SCALE, + fuse->aging_init_quot_diff, + MSMCOBALT_KBSS_AGING_INIT_QUOT_DIFF_SIZE); + + cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n", + ctrl->aging_sensor->sensor_id, + ctrl->aging_sensor->init_quot_diff, + ctrl->aging_sensor->ro_scale); + + return 0; +} + +/** * cprh_kbss_init_controller() - perform KBSS CPRh controller specific * initializations * @ctrl: Pointer to the CPR3 controller @@ -1566,6 +1595,13 @@ static int cprh_kbss_regulator_probe(struct platform_device *pdev) return rc; } + rc = cprh_kbss_init_aging(ctrl); + if (rc) { + cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n", + rc); + return rc; + } + platform_set_drvdata(pdev, ctrl); rc = cprh_kbss_populate_opp_table(ctrl); diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c index 8953722e8dad..e23dc3e8d9da 100644 --- a/drivers/scsi/ufs/ufs_test.c +++ b/drivers/scsi/ufs/ufs_test.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -689,13 +689,13 @@ static void scenario_free_end_io_fn(struct request *rq, int err) __blk_put_request(test_iosched->req_q, test_rq->rq); spin_unlock_irqrestore(&test_iosched->lock, flags); - test_iosched_free_test_req_data_buffer(test_rq); - kfree(test_rq); - if (err) pr_err("%s: request %d completed, err=%d", __func__, test_rq->req_id, err); + test_iosched_free_test_req_data_buffer(test_rq); + kfree(test_rq); + check_test_completion(test_iosched); } @@ -984,14 +984,14 @@ static void long_test_free_end_io_fn(struct request *rq, int err) return; } - test_iosched_free_test_req_data_buffer(test_rq); - kfree(test_rq); - utd->completed_req_count++; - if (err) pr_err("%s: request %d completed, err=%d", __func__, test_rq->req_id, err); + test_iosched_free_test_req_data_buffer(test_rq); + kfree(test_rq); + utd->completed_req_count++; + check_test_completion(test_iosched); } @@ -1007,7 +1007,7 @@ static void long_test_free_end_io_fn(struct request *rq, int err) static int run_long_test(struct test_iosched *test_iosched) { int ret = 0; - int direction, num_bios_per_request; + int direction, num_bios_per_request = 1; static unsigned int inserted_requests; u32 sector, seed, num_bios, seq_sector_delta; struct ufs_test_data *utd = test_iosched->blk_dev_test_data; @@ -1028,14 +1028,12 @@ static int run_long_test(struct test_iosched *test_iosched) /* Set test parameters */ switch (test_iosched->test_info.testcase) { case UFS_TEST_LONG_RANDOM_READ: - num_bios_per_request = 1; utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) / (LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE * num_bios_per_request); direction = READ; break; case UFS_TEST_LONG_RANDOM_WRITE: - num_bios_per_request = 1; utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) / (LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE * num_bios_per_request); diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c index f4c69d624342..ecf89b2b3b37 100644 --- a/drivers/soc/qcom/common_log.c +++ b/drivers/soc/qcom/common_log.c @@ -20,7 +20,7 @@ #include <soc/qcom/memory_dump.h> #define MISC_DUMP_DATA_LEN 4096 -#define PMIC_DUMP_DATA_LEN 4096 +#define PMIC_DUMP_DATA_LEN (64 * 1024) #define VSENSE_DUMP_DATA_LEN 4096 #define RPM_DUMP_DATA_LEN (160 * 1024) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index 999e6f93e873..2aa588ba610b 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -260,7 +260,12 @@ void *icnss_ipc_log_context; void *icnss_ipc_log_long_context; #endif -#define ICNSS_EVENT_PENDING 2989 +#define ICNSS_EVENT_PENDING 2989 + +#define ICNSS_EVENT_SYNC BIT(0) +#define ICNSS_EVENT_UNINTERRUPTIBLE BIT(1) +#define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE (ICNSS_EVENT_UNINTERRUPTIBLE | \ + ICNSS_EVENT_SYNC) enum icnss_driver_event_type { ICNSS_DRIVER_EVENT_SERVER_ARRIVE, @@ -291,7 +296,6 @@ enum icnss_driver_state { ICNSS_FW_READY, ICNSS_DRIVER_PROBED, ICNSS_FW_TEST_MODE, - ICNSS_SUSPEND, ICNSS_PM_SUSPEND, ICNSS_PM_SUSPEND_NOIRQ, ICNSS_SSR_ENABLED, @@ -359,6 +363,8 @@ struct icnss_stats { uint32_t pm_suspend_noirq_err; uint32_t pm_resume_noirq; uint32_t pm_resume_noirq_err; + uint32_t pm_stay_awake; + uint32_t pm_relax; uint32_t ind_register_req; uint32_t ind_register_resp; @@ -436,7 +442,6 @@ static struct icnss_priv { struct notifier_block get_service_nb; void *modem_notify_handler; struct notifier_block modem_ssr_nb; - struct wakeup_source ws; uint32_t diag_reg_read_addr; uint32_t diag_reg_read_mem_type; uint32_t diag_reg_read_len; @@ -445,6 +450,7 @@ static struct icnss_priv { struct qpnp_adc_tm_chip *adc_tm_dev; struct qpnp_vadc_chip *vadc_dev; uint64_t vph_pwr; + atomic_t pm_count; } *penv; static void icnss_hw_write_reg(void *base, u32 offset, u32 val) @@ -512,6 +518,35 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val, return 0; } +static void icnss_pm_stay_awake(struct icnss_priv *priv) +{ + if (atomic_inc_return(&priv->pm_count) != 1) + return; + + icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state, + atomic_read(&priv->pm_count)); + + pm_stay_awake(&priv->pdev->dev); + + priv->stats.pm_stay_awake++; +} + +static void icnss_pm_relax(struct icnss_priv *priv) +{ + int r = atomic_dec_return(&priv->pm_count); + + WARN_ON(r < 0); + + if (r != 0) + return; + + icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state, + atomic_read(&priv->pm_count)); + + pm_relax(&priv->pdev->dev); + priv->stats.pm_relax++; +} + static char *icnss_driver_event_to_str(enum icnss_driver_event_type type) { switch (type) { @@ -535,16 +570,16 @@ static char *icnss_driver_event_to_str(enum icnss_driver_event_type type) }; static int icnss_driver_event_post(enum icnss_driver_event_type type, - bool sync, void *data) + u32 flags, void *data) { struct icnss_driver_event *event; - unsigned long flags; + unsigned long irq_flags; int gfp = GFP_KERNEL; int ret = 0; - icnss_pr_dbg("Posting event: %s: %s%s(%d), state: 0x%lx\n", - current->comm, icnss_driver_event_to_str(type), - sync ? "-sync" : "", type, penv->state); + icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n", + icnss_driver_event_to_str(type), type, current->comm, + flags, penv->state); if (type >= ICNSS_DRIVER_EVENT_MAX) { icnss_pr_err("Invalid Event type: %d, can't post", type); @@ -558,39 +593,47 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type, if (event == NULL) return -ENOMEM; + icnss_pm_stay_awake(penv); + event->type = type; event->data = data; init_completion(&event->complete); event->ret = ICNSS_EVENT_PENDING; - event->sync = sync; + event->sync = !!(flags & ICNSS_EVENT_SYNC); - spin_lock_irqsave(&penv->event_lock, flags); + spin_lock_irqsave(&penv->event_lock, irq_flags); list_add_tail(&event->list, &penv->event_list); - spin_unlock_irqrestore(&penv->event_lock, flags); + spin_unlock_irqrestore(&penv->event_lock, irq_flags); penv->stats.events[type].posted++; queue_work(penv->event_wq, &penv->event_work); - if (!sync) - return ret; + if (!(flags & ICNSS_EVENT_SYNC)) + goto out; - ret = wait_for_completion_interruptible(&event->complete); + if (flags & ICNSS_EVENT_UNINTERRUPTIBLE) + wait_for_completion(&event->complete); + else + ret = wait_for_completion_interruptible(&event->complete); icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n", icnss_driver_event_to_str(type), type, penv->state, ret, event->ret); - spin_lock_irqsave(&penv->event_lock, flags); + spin_lock_irqsave(&penv->event_lock, irq_flags); if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) { event->sync = false; - spin_unlock_irqrestore(&penv->event_lock, flags); - return ret; + spin_unlock_irqrestore(&penv->event_lock, irq_flags); + ret = -EINTR; + goto out; } - spin_unlock_irqrestore(&penv->event_lock, flags); + spin_unlock_irqrestore(&penv->event_lock, irq_flags); ret = event->ret; kfree(event); +out: + icnss_pm_relax(penv); return ret; } @@ -2194,7 +2237,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle, switch (msg_id) { case QMI_WLFW_FW_READY_IND_V01: icnss_driver_event_post(ICNSS_DRIVER_EVENT_FW_READY_IND, - false, NULL); + 0, NULL); break; case QMI_WLFW_MSA_READY_IND_V01: icnss_pr_dbg("Received MSA Ready Indication msg_id 0x%x\n", @@ -2382,8 +2425,6 @@ static int icnss_driver_event_fw_ready_ind(void *data) if (!penv) return -ENODEV; - __pm_stay_awake(&penv->ws); - set_bit(ICNSS_FW_READY, &penv->state); icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state); @@ -2401,10 +2442,7 @@ static int icnss_driver_event_fw_ready_ind(void *data) else ret = icnss_call_driver_probe(penv); - __pm_relax(&penv->ws); - out: - __pm_relax(&penv->ws); return ret; } @@ -2415,8 +2453,6 @@ static int icnss_driver_event_register_driver(void *data) if (penv->ops) return -EEXIST; - __pm_stay_awake(&penv->ws); - penv->ops = data; if (test_bit(SKIP_QMI, &quirks)) @@ -2442,21 +2478,16 @@ static int icnss_driver_event_register_driver(void *data) set_bit(ICNSS_DRIVER_PROBED, &penv->state); - __pm_relax(&penv->ws); - return 0; power_off: icnss_hw_power_off(penv); out: - __pm_relax(&penv->ws); return ret; } static int icnss_driver_event_unregister_driver(void *data) { - __pm_stay_awake(&penv->ws); - if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) { penv->ops = NULL; goto out; @@ -2472,7 +2503,6 @@ static int icnss_driver_event_unregister_driver(void *data) icnss_hw_power_off(penv); out: - __pm_relax(&penv->ws); return 0; } @@ -2550,6 +2580,8 @@ static void icnss_driver_event_work(struct work_struct *work) unsigned long flags; int ret; + icnss_pm_stay_awake(penv); + spin_lock_irqsave(&penv->event_lock, flags); while (!list_empty(&penv->event_list)) { @@ -2609,6 +2641,8 @@ static void icnss_driver_event_work(struct work_struct *work) spin_lock_irqsave(&penv->event_lock, flags); } spin_unlock_irqrestore(&penv->event_lock, flags); + + icnss_pm_relax(penv); } static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this, @@ -2625,12 +2659,12 @@ static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this, switch (code) { case QMI_SERVER_ARRIVE: ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_ARRIVE, - false, NULL); + 0, NULL); break; case QMI_SERVER_EXIT: ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_EXIT, - false, NULL); + 0, NULL); break; default: icnss_pr_dbg("Invalid code: %ld", code); @@ -2667,7 +2701,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb, event_data->crashed = notif->crashed; icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN, - true, event_data); + ICNSS_EVENT_SYNC, event_data); return NOTIFY_OK; } @@ -2742,7 +2776,7 @@ static int icnss_service_notifier_notify(struct notifier_block *nb, event_data->crashed = true; icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN, - true, event_data); + ICNSS_EVENT_SYNC, event_data); break; case SERVREG_NOTIF_SERVICE_STATE_UP_V01: icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state); @@ -2914,7 +2948,7 @@ int icnss_register_driver(struct icnss_driver_ops *ops) } ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER, - true, ops); + ICNSS_EVENT_SYNC, ops); if (ret == -ERESTARTSYS) ret = 0; @@ -2942,7 +2976,7 @@ int icnss_unregister_driver(struct icnss_driver_ops *ops) } ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER, - true, NULL); + ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL); out: return ret; } @@ -3270,6 +3304,12 @@ int icnss_wlan_disable(enum icnss_driver_mode mode) } EXPORT_SYMBOL(icnss_wlan_disable); +bool icnss_is_qmi_disable(void) +{ + return test_bit(SKIP_QMI, &quirks) ? true : false; +} +EXPORT_SYMBOL(icnss_is_qmi_disable); + int icnss_get_ce_id(int irq) { int i; @@ -3429,7 +3469,6 @@ static void icnss_bw_deinit(struct icnss_priv *priv) static int icnss_smmu_init(struct icnss_priv *priv) { struct dma_iommu_mapping *mapping; - int disable_htw = 1; int atomic_ctx = 1; int s1_bypass = 1; int ret = 0; @@ -3446,15 +3485,6 @@ static int icnss_smmu_init(struct icnss_priv *priv) } ret = iommu_domain_set_attr(mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - if (ret < 0) { - icnss_pr_err("Set disable_htw attribute failed, err = %d\n", - ret); - goto set_attr_fail; - } - - ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC, &atomic_ctx); if (ret < 0) { @@ -3821,9 +3851,6 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv) case ICNSS_FW_TEST_MODE: seq_puts(s, "FW TEST MODE"); continue; - case ICNSS_SUSPEND: - seq_puts(s, "SUSPEND"); - continue; case ICNSS_PM_SUSPEND: seq_puts(s, "PM SUSPEND"); continue; @@ -3843,6 +3870,7 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv) seq_puts(s, "MSA0 ASSIGNED"); continue; case ICNSS_WLFW_EXISTS: + seq_puts(s, "WLAN FW EXISTS"); continue; } @@ -3947,6 +3975,8 @@ static int icnss_stats_show(struct seq_file *s, void *data) ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq_err); ICNSS_STATS_DUMP(s, priv, pm_resume_noirq); ICNSS_STATS_DUMP(s, priv, pm_resume_noirq_err); + ICNSS_STATS_DUMP(s, priv, pm_stay_awake); + ICNSS_STATS_DUMP(s, priv, pm_relax); icnss_stats_show_irqs(s, priv); @@ -4398,8 +4428,6 @@ static int icnss_probe(struct platform_device *pdev) spin_lock_init(&priv->event_lock); spin_lock_init(&priv->on_off_lock); - wakeup_source_init(&priv->ws, "icnss_ws"); - priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1); if (!priv->event_wq) { icnss_pr_err("Workqueue creation failed\n"); @@ -4461,8 +4489,6 @@ static int icnss_remove(struct platform_device *pdev) icnss_bw_deinit(penv); - wakeup_source_trash(&penv->ws); - icnss_hw_power_off(penv); dev_set_drvdata(&pdev->dev, NULL); @@ -4470,55 +4496,6 @@ static int icnss_remove(struct platform_device *pdev) return 0; } -static int icnss_suspend(struct platform_device *pdev, - pm_message_t state) -{ - int ret = 0; - - if (!penv) { - ret = -ENODEV; - goto out; - } - - icnss_pr_dbg("Driver suspending, state: 0x%lx\n", - penv->state); - - if (!penv->ops || !penv->ops->suspend || - !test_bit(ICNSS_DRIVER_PROBED, &penv->state)) - goto out; - - ret = penv->ops->suspend(&pdev->dev, state); - -out: - if (ret == 0) - set_bit(ICNSS_SUSPEND, &penv->state); - return ret; -} - -static int icnss_resume(struct platform_device *pdev) -{ - int ret = 0; - - if (!penv) { - ret = -ENODEV; - goto out; - } - - icnss_pr_dbg("Driver resuming, state: 0x%lx\n", - penv->state); - - if (!penv->ops || !penv->ops->resume || - !test_bit(ICNSS_DRIVER_PROBED, &penv->state)) - goto out; - - ret = penv->ops->resume(&pdev->dev); - -out: - if (ret == 0) - clear_bit(ICNSS_SUSPEND, &penv->state); - return ret; -} - #ifdef CONFIG_PM_SLEEP static int icnss_pm_suspend(struct device *dev) { @@ -4654,8 +4631,6 @@ MODULE_DEVICE_TABLE(of, icnss_dt_match); static struct platform_driver icnss_driver = { .probe = icnss_probe, .remove = icnss_remove, - .suspend = icnss_suspend, - .resume = icnss_resume, .driver = { .name = "icnss", .pm = &icnss_pm_ops, diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c index 9b44fb03cf94..83e3775ed533 100644 --- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c +++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c @@ -741,7 +741,6 @@ static int msm_audio_smmu_init(struct device *dev) { struct dma_iommu_mapping *mapping; int ret; - int disable_htw = 1; mapping = arm_iommu_create_mapping( msm_iommu_get_bus(dev), @@ -750,10 +749,6 @@ static int msm_audio_smmu_init(struct device *dev) if (IS_ERR(mapping)) return PTR_ERR(mapping); - iommu_domain_set_attr(mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, - &disable_htw); - ret = arm_iommu_attach_device(dev, mapping); if (ret) { dev_err(dev, "%s: Attach failed, err = %d\n", diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c index c08668149636..4e406f7cd379 100644 --- a/drivers/soc/qcom/rpm-smd-debug.c +++ b/drivers/soc/qcom/rpm-smd-debug.c @@ -104,8 +104,6 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer, if (msm_rpm_wait_for_ack(msm_rpm_send_request(req))) pr_err("Sending the RPM message failed\n"); - else - pr_info("RPM message sent succesfully\n"); err_request: msm_rpm_free_request(req); diff --git a/drivers/soc/qcom/system_stats.c b/drivers/soc/qcom/system_stats.c index 476d2f6dca27..ba35928a991b 100644 --- a/drivers/soc/qcom/system_stats.c +++ b/drivers/soc/qcom/system_stats.c @@ -154,7 +154,7 @@ static int rpm_stats_write_buf(struct seq_file *m) time = get_time_in_msec(time); seq_printf(m, "\ttime in last mode(msec):%llu\n", time); - time = arch_counter_get_cntpct() - rs.last_exited_at; + time = arch_counter_get_cntvct() - rs.last_exited_at; time = get_time_in_sec(time); seq_printf(m, "\ttime since last mode(sec):%llu\n", time); diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c index 310903b10a98..92cdadef715d 100644 --- a/drivers/soc/qcom/wcd-dsp-glink.c +++ b/drivers/soc/qcom/wcd-dsp-glink.c @@ -32,6 +32,7 @@ #define WDSP_EDGE "wdsp" #define RESP_QUEUE_SIZE 3 #define QOS_PKT_SIZE 1024 +#define TIMEOUT_MS 1000 struct wdsp_glink_dev { struct class *cls; @@ -71,7 +72,19 @@ struct wdsp_glink_ch { /* To free up the channel memory */ bool free_mem; - /* Glink channel configuration */ + /* Glink local channel open work */ + struct work_struct lcl_ch_open_wrk; + + /* Glink local channel close work */ + struct work_struct lcl_ch_cls_wrk; + + /* Wait for ch connect state before sending any command */ + wait_queue_head_t ch_connect_wait; + + /* + * Glink channel configuration. This has to be the last + * member of the strucuture as it has variable size + */ struct wdsp_glink_ch_cfg ch_cfg; }; @@ -89,12 +102,15 @@ struct wdsp_glink_priv { struct mutex rsp_mutex; /* Glink channel related */ + struct mutex glink_mutex; struct wdsp_glink_state glink_state; struct wdsp_glink_ch **ch; u8 no_of_channels; struct work_struct ch_open_cls_wrk; struct workqueue_struct *work_queue; + wait_queue_head_t link_state_wait; + struct device *dev; }; @@ -214,6 +230,36 @@ done: } /* + * wdsp_glink_lcl_ch_open_wrk - Work function to open channel again + * when local disconnect event happens + * work: Work structure + */ +static void wdsp_glink_lcl_ch_open_wrk(struct work_struct *work) +{ + struct wdsp_glink_ch *ch; + + ch = container_of(work, struct wdsp_glink_ch, + lcl_ch_open_wrk); + + wdsp_glink_open_ch(ch); +} + +/* + * wdsp_glink_lcl_ch_cls_wrk - Work function to close channel locally + * when remote disconnect event happens + * work: Work structure + */ +static void wdsp_glink_lcl_ch_cls_wrk(struct work_struct *work) +{ + struct wdsp_glink_ch *ch; + + ch = container_of(work, struct wdsp_glink_ch, + lcl_ch_cls_wrk); + + wdsp_glink_close_ch(ch); +} + +/* * wdsp_glink_notify_state - Glink channel state information event callback * handle: Opaque Channel handle returned by GLink * priv: Private pointer to the channel @@ -258,6 +304,7 @@ static void wdsp_glink_notify_state(void *handle, const void *priv, __func__, ch->ch_cfg.latency_in_us, ch->ch_cfg.name); + wake_up(&ch->ch_connect_wait); mutex_unlock(&ch->mutex); } else if (event == GLINK_LOCAL_DISCONNECTED) { /* @@ -271,6 +318,9 @@ static void wdsp_glink_notify_state(void *handle, const void *priv, if (ch->free_mem) { kfree(ch); ch = NULL; + } else { + /* Open the glink channel again */ + queue_work(wpriv->work_queue, &ch->lcl_ch_open_wrk); } } else if (event == GLINK_REMOTE_DISCONNECTED) { dev_dbg(wpriv->dev, "%s: remote channel: %s disconnected remotely\n", @@ -278,10 +328,10 @@ static void wdsp_glink_notify_state(void *handle, const void *priv, mutex_unlock(&ch->mutex); /* * If remote disconnect happens, local side also has - * to close the channel and reopen again as per glink + * to close the channel as per glink design in a + * separate work_queue. */ - if (!wdsp_glink_close_ch(ch)) - wdsp_glink_open_ch(ch); + queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk); } } @@ -294,16 +344,23 @@ static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch) struct wdsp_glink_priv *wpriv = ch->wpriv; int ret = 0; + mutex_lock(&wpriv->glink_mutex); + if (ch->handle) { + ret = glink_close(ch->handle); + if (IS_ERR_VALUE(ret)) { + dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n", + __func__, ret); + } else { + ch->handle = NULL; + dev_dbg(wpriv->dev, "%s: ch %s is closed\n", __func__, + ch->ch_cfg.name); + } + } else { + dev_dbg(wpriv->dev, "%s: ch %s is already closed\n", __func__, + ch->ch_cfg.name); + } + mutex_unlock(&wpriv->glink_mutex); - mutex_lock(&ch->mutex); - - dev_dbg(wpriv->dev, "%s: ch %s closing\n", __func__, ch->ch_cfg.name); - ret = glink_close(ch->handle); - if (IS_ERR_VALUE(ret)) - dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n", - __func__, ret); - - mutex_unlock(&ch->mutex); return ret; } @@ -318,29 +375,34 @@ static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch) struct glink_open_config open_cfg; int ret = 0; - memset(&open_cfg, 0, sizeof(open_cfg)); - open_cfg.options = GLINK_OPT_INITIAL_XPORT; - open_cfg.edge = WDSP_EDGE; - open_cfg.notify_rx = wdsp_glink_notify_rx; - open_cfg.notify_tx_done = wdsp_glink_notify_tx_done; - open_cfg.notify_state = wdsp_glink_notify_state; - open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req; - open_cfg.priv = ch; - open_cfg.name = ch->ch_cfg.name; - - dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n", - __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us, - ch->ch_cfg.no_of_intents); - - mutex_lock(&ch->mutex); - ch->handle = glink_open(&open_cfg); - if (IS_ERR_OR_NULL(ch->handle)) { - dev_err(wpriv->dev, "%s: glink_open failed %s\n", - __func__, ch->ch_cfg.name); - ch->handle = NULL; - ret = -EINVAL; + mutex_lock(&wpriv->glink_mutex); + if (!ch->handle) { + memset(&open_cfg, 0, sizeof(open_cfg)); + open_cfg.options = GLINK_OPT_INITIAL_XPORT; + open_cfg.edge = WDSP_EDGE; + open_cfg.notify_rx = wdsp_glink_notify_rx; + open_cfg.notify_tx_done = wdsp_glink_notify_tx_done; + open_cfg.notify_state = wdsp_glink_notify_state; + open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req; + open_cfg.priv = ch; + open_cfg.name = ch->ch_cfg.name; + + dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n", + __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us, + ch->ch_cfg.no_of_intents); + + ch->handle = glink_open(&open_cfg); + if (IS_ERR_OR_NULL(ch->handle)) { + dev_err(wpriv->dev, "%s: glink_open failed for ch %s\n", + __func__, ch->ch_cfg.name); + ch->handle = NULL; + ret = -EINVAL; + } + } else { + dev_err(wpriv->dev, "%s: ch %s is already opened\n", __func__, + ch->ch_cfg.name); } - mutex_unlock(&ch->mutex); + mutex_unlock(&wpriv->glink_mutex); return ret; } @@ -354,7 +416,7 @@ static void wdsp_glink_close_all_ch(struct wdsp_glink_priv *wpriv) int i; for (i = 0; i < wpriv->no_of_channels; i++) - if (wpriv->ch[i]) + if (wpriv->ch && wpriv->ch[i]) wdsp_glink_close_ch(wpriv->ch[i]); } @@ -425,7 +487,12 @@ static void wdsp_glink_link_state_cb(struct glink_link_state_cb_info *cb_info, } wpriv = (struct wdsp_glink_priv *)priv; + + mutex_lock(&wpriv->glink_mutex); wpriv->glink_state.link_state = cb_info->link_state; + wake_up(&wpriv->link_state_wait); + mutex_unlock(&wpriv->glink_mutex); + queue_work(wpriv->work_queue, &wpriv->ch_open_cls_wrk); } @@ -477,6 +544,9 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, mutex_init(&ch[i]->mutex); ch[i]->wpriv = wpriv; + INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk); + INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk); + init_waitqueue_head(&ch[i]->ch_connect_wait); } wpriv->ch = ch; wpriv->no_of_channels = no_of_channels; @@ -540,15 +610,26 @@ static void wdsp_glink_tx_buf_work(struct work_struct *work) ret = glink_tx(ch->handle, tx_buf, cpkt->payload, cpkt->payload_size, GLINK_TX_REQ_INTENT); - if (IS_ERR_VALUE(ret)) + if (IS_ERR_VALUE(ret)) { dev_err(wpriv->dev, "%s: glink tx failed, ret = %d\n", __func__, ret); + /* + * If glink_tx() is failed then free tx_buf here as + * there won't be any tx_done notification to + * free the buffer. + */ + kfree(tx_buf); + } } else { dev_err(wpriv->dev, "%s: channel %s is not in connected state\n", __func__, ch->ch_cfg.name); + /* + * Free tx_buf here as there won't be any tx_done + * notification in this case also. + */ + kfree(tx_buf); } mutex_unlock(&tx_buf->ch->mutex); - } /* @@ -678,7 +759,32 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, __func__, ret); kfree(tx_buf); break; + case WDSP_READY_PKT: + ret = wait_event_timeout(wpriv->link_state_wait, + (wpriv->glink_state.link_state == + GLINK_LINK_STATE_UP), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + dev_err(wpriv->dev, "%s: Link state wait timeout\n", + __func__); + ret = -ETIMEDOUT; + goto free_buf; + } + ret = 0; + kfree(tx_buf); + break; case WDSP_CMD_PKT: + mutex_lock(&wpriv->glink_mutex); + if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) { + mutex_unlock(&wpriv->glink_mutex); + dev_err(wpriv->dev, "%s: Link state is Down\n", + __func__); + + ret = -ENETRESET; + goto free_buf; + } + mutex_unlock(&wpriv->glink_mutex); + cpkt = (struct wdsp_cmd_pkt *)wpkt->payload; dev_dbg(wpriv->dev, "%s: requested ch_name: %s\n", __func__, cpkt->ch_name); @@ -696,6 +802,20 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf, ret = -EINVAL; goto free_buf; } + + ret = wait_event_timeout(tx_buf->ch->ch_connect_wait, + (tx_buf->ch->channel_state == + GLINK_CONNECTED), + msecs_to_jiffies(TIMEOUT_MS)); + if (!ret) { + dev_err(wpriv->dev, "%s: glink channel %s is not in connected state %d\n", + __func__, tx_buf->ch->ch_cfg.name, + tx_buf->ch->channel_state); + ret = -ETIMEDOUT; + goto free_buf; + } + ret = 0; + INIT_WORK(&tx_buf->tx_work, wdsp_glink_tx_buf_work); queue_work(wpriv->work_queue, &tx_buf->tx_work); break; @@ -747,7 +867,9 @@ static int wdsp_glink_open(struct inode *inode, struct file *file) } init_completion(&wpriv->rsp_complete); + init_waitqueue_head(&wpriv->link_state_wait); mutex_init(&wpriv->rsp_mutex); + mutex_init(&wpriv->glink_mutex); file->private_data = wpriv; goto done; @@ -801,28 +923,39 @@ static int wdsp_glink_release(struct inode *inode, struct file *file) goto done; } + if (wpriv->glink_state.handle) + glink_unregister_link_state_cb(wpriv->glink_state.handle); + flush_workqueue(wpriv->work_queue); + destroy_workqueue(wpriv->work_queue); + /* * Clean up glink channel memory in channel state * callback only if close channels are called from here. */ if (wpriv->ch) { - for (i = 0; i < wpriv->no_of_channels; i++) - if (wpriv->ch[i]) + for (i = 0; i < wpriv->no_of_channels; i++) { + if (wpriv->ch[i]) { wpriv->ch[i]->free_mem = true; + /* + * Channel handle NULL means channel is already + * closed. Free the channel memory here itself. + */ + if (!wpriv->ch[i]->handle) { + kfree(wpriv->ch[i]); + wpriv->ch[i] = NULL; + } else { + wdsp_glink_close_ch(wpriv->ch[i]); + } + } + } - wdsp_glink_close_all_ch(wpriv); kfree(wpriv->ch); wpriv->ch = NULL; } - if (wpriv->glink_state.handle) - glink_unregister_link_state_cb(wpriv->glink_state.handle); - + mutex_destroy(&wpriv->glink_mutex); mutex_destroy(&wpriv->rsp_mutex); - if (wpriv->work_queue) - destroy_workqueue(wpriv->work_queue); - kfree(wpriv); file->private_data = NULL; diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a8c8e120c348..e0af922a0329 100755 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1201,7 +1201,7 @@ static void ion_vm_open(struct vm_area_struct *vma) mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); - pr_debug("%s: adding %p\n", __func__, vma); + pr_debug("%s: adding %pK\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) @@ -1216,7 +1216,7 @@ static void ion_vm_close(struct vm_area_struct *vma) continue; list_del(&vma_list->list); kfree(vma_list); - pr_debug("%s: deleting %p\n", __func__, vma); + pr_debug("%s: deleting %pK\n", __func__, vma); break; } mutex_unlock(&buffer->lock); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index aaea7bed36e1..b2e1a4c1b170 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -94,7 +94,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, /* keep this for memory release */ buffer->priv_virt = info; - dev_dbg(dev, "Allocate buffer %p\n", buffer); + dev_dbg(dev, "Allocate buffer %pK\n", buffer); return 0; err: @@ -107,7 +107,7 @@ static void ion_cma_free(struct ion_buffer *buffer) struct device *dev = buffer->heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; - dev_dbg(dev, "Release buffer %p\n", buffer); + dev_dbg(dev, "Release buffer %pK\n", buffer); /* release memory */ dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); sg_free_table(info->table); @@ -123,7 +123,7 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, struct device *dev = heap->priv; struct ion_cma_buffer_info *info = buffer->priv_virt; - dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, + dev_dbg(dev, "Return buffer %pK physical address %pa\n", buffer, &info->handle); *addr = info->handle; diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c index d945b9251437..90ae7eb65b65 100644 --- a/drivers/staging/android/ion/ion_cma_secure_heap.c +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -3,7 +3,7 @@ * * Copyright (C) Linaro 2012 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. - * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -501,7 +501,7 @@ retry: /* keep this for memory release */ buffer->priv_virt = info; - dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer); + dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer); return info; err: @@ -634,7 +634,7 @@ retry: sg = sg_next(sg); } buffer->priv_virt = info; - dev_dbg(sheap->dev, "Allocate buffer %p\n", buffer); + dev_dbg(sheap->dev, "Allocate buffer %pK\n", buffer); return info; err2: @@ -721,7 +721,7 @@ static void ion_secure_cma_free(struct ion_buffer *buffer) struct ion_secure_cma_buffer_info *info = buffer->priv_virt; int ret = 0; - dev_dbg(sheap->dev, "Release buffer %p\n", buffer); + dev_dbg(sheap->dev, "Release buffer %pK\n", buffer); if (msm_secure_v2_is_supported()) ret = msm_unsecure_table(info->table); atomic_sub(buffer->size, &sheap->total_allocated); @@ -743,8 +743,8 @@ static int ion_secure_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, container_of(heap, struct ion_cma_secure_heap, heap); struct ion_secure_cma_buffer_info *info = buffer->priv_virt; - dev_dbg(sheap->dev, "Return buffer %p physical address 0x%pa\n", buffer, - &info->phys); + dev_dbg(sheap->dev, "Return buffer %pK physical address 0x%pa\n", + buffer, &info->phys); *addr = info->phys; *len = buffer->size; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index fd4d45ad8db2..03b2b8a38991 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -204,11 +204,16 @@ static struct page *split_page_from_secure_pool(struct ion_system_heap *heap, split_page(page, order); break; } - /* Return the remaining order-0 pages to the pool */ - if (page) - for (j = 1; j < (1 << order); j++) + /* + * Return the remaining order-0 pages to the pool. + * SetPagePrivate flag to mark memory as secure. + */ + if (page) { + for (j = 1; j < (1 << order); j++) { + SetPagePrivate(page + j); free_buffer_page(heap, buffer, page + j, 0); - + } + } got_page: mutex_unlock(&heap->split_page_mutex); diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c index 592c603b1780..176f22ba570c 100644 --- a/drivers/staging/android/ion/msm/msm_ion.c +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -711,7 +711,7 @@ long msm_ion_custom_ioctl(struct ion_client *client, } else { handle = ion_import_dma_buf(client, data.flush_data.fd); if (IS_ERR(handle)) { - pr_info("%s: Could not import handle: %p\n", + pr_info("%s: Could not import handle: %pK\n", __func__, handle); return -EINVAL; } @@ -724,8 +724,8 @@ long msm_ion_custom_ioctl(struct ion_client *client, + data.flush_data.length; if (start && check_vaddr_bounds(start, end)) { - pr_err("%s: virtual address %p is out of bounds\n", - __func__, data.flush_data.vaddr); + pr_err("%s: virtual address %pK is out of bounds\n", + __func__, data.flush_data.vaddr); ret = -EINVAL; } else { ret = ion_do_cache_op( diff --git a/drivers/thermal/lmh_lite.c b/drivers/thermal/lmh_lite.c index bd456d25b124..32a573d22270 100644 --- a/drivers/thermal/lmh_lite.c +++ b/drivers/thermal/lmh_lite.c @@ -640,7 +640,7 @@ sens_exit: static int lmh_get_sensor_list(void) { - int ret = 0; + int ret = 0, buf_size = 0; uint32_t size = 0, next = 0, idx = 0, count = 0; struct scm_desc desc_arg; struct lmh_sensor_packet *payload = NULL; @@ -649,12 +649,13 @@ static int lmh_get_sensor_list(void) uint32_t size; } cmd_buf; - payload = kzalloc(sizeof(*payload), GFP_KERNEL); + buf_size = PAGE_ALIGN(sizeof(*payload)); + payload = kzalloc(buf_size, GFP_KERNEL); if (!payload) return -ENOMEM; do { - memset(payload, 0, sizeof(*payload)); + memset(payload, 0, buf_size); payload->count = next; cmd_buf.addr = SCM_BUFFER_PHYS(payload); /* payload_phys may be a physical address > 4 GB */ @@ -663,7 +664,7 @@ static int lmh_get_sensor_list(void) lmh_sensor_packet); desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL); trace_lmh_event_call("GET_SENSORS enter"); - dmac_flush_range(payload, payload + sizeof(*payload)); + dmac_flush_range(payload, payload + buf_size); if (!is_scm_armv8()) ret = scm_call(SCM_SVC_LMH, LMH_GET_SENSORS, (void *) &cmd_buf, @@ -881,7 +882,8 @@ static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf) if (curr_size != size) { if (payload) devm_kfree(lmh_data->dev, payload); - payload = devm_kzalloc(lmh_data->dev, size, GFP_KERNEL); + payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size), + GFP_KERNEL); if (!payload) { pr_err("payload buffer alloc failed\n"); ret = -ENOMEM; @@ -948,7 +950,8 @@ static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size) trace_lmh_debug_data("Config LMH", buf, size); size_bytes = (size - 3) * sizeof(uint32_t); - payload = devm_kzalloc(lmh_data->dev, size_bytes, GFP_KERNEL); + payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes), + GFP_KERNEL); if (!payload) { ret = -ENOMEM; goto set_cfg_exit; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3df80c73b74a..ac0eb0939ecf 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2990,6 +2990,9 @@ void usb_remove_hcd(struct usb_hcd *hcd) cancel_work_sync(&hcd->wakeup_work); #endif + /* handle any pending hub events before XHCI stops */ + usb_flush_hub_wq(); + mutex_lock(&usb_bus_list_lock); usb_disconnect(&rhdev); /* Sets rhdev to NULL */ mutex_unlock(&usb_bus_list_lock); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 84df093639ac..269c1ee2da44 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -610,6 +610,12 @@ void usb_kick_hub_wq(struct usb_device *hdev) kick_hub_wq(hub); } +void usb_flush_hub_wq(void) +{ + flush_workqueue(hub_wq); +} +EXPORT_SYMBOL(usb_flush_hub_wq); + /* * Let the USB core know that a USB 3.0 device has sent a Function Wake Device * Notification, which indicates it had initiated remote wakeup. diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 4a4f1083198c..08006d84fb38 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -1249,6 +1249,7 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, struct usb_gsi_request *request; struct gsi_channel_info *ch_info; bool block_db, f_suspend; + unsigned long flags; switch (op) { case GSI_EP_OP_PREPARE_TRBS: @@ -1263,11 +1264,15 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, case GSI_EP_OP_CONFIG: request = (struct usb_gsi_request *)op_data; dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name); + spin_lock_irqsave(&dwc->lock, flags); gsi_configure_ep(ep, request); + spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_STARTXFER: dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name); + spin_lock_irqsave(&dwc->lock, flags); ret = gsi_startxfer_for_ep(ep); + spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_GET_XFER_IDX: dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name); @@ -1293,12 +1298,16 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, case GSI_EP_OP_UPDATEXFER: request = (struct usb_gsi_request *)op_data; dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n"); + spin_lock_irqsave(&dwc->lock, flags); ret = gsi_updatexfer_for_ep(ep, request); + spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_ENDXFER: request = (struct usb_gsi_request *)op_data; dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name); + spin_lock_irqsave(&dwc->lock, flags); gsi_endxfer_for_ep(ep); + spin_unlock_irqrestore(&dwc->lock, flags); break; case GSI_EP_OP_SET_CLR_BLOCK_DBL: block_db = *((bool *)op_data); diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index d489e453594a..f058ab4cedaa 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -461,7 +461,7 @@ static struct usb_gadget_strings *rmnet_gsi_strings[] = { /* rndis device descriptors */ -/* interface descriptor: */ +/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/ static struct usb_interface_descriptor rndis_gsi_control_intf = { .bLength = sizeof(rndis_gsi_control_intf), .bDescriptorType = USB_DT_INTERFACE, @@ -469,9 +469,9 @@ static struct usb_interface_descriptor rndis_gsi_control_intf = { /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, - .bInterfaceClass = USB_CLASS_COMM, - .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, - .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, + .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER, + .bInterfaceSubClass = 0x01, + .bInterfaceProtocol = 0x03, /* .iInterface = DYNAMIC */ }; @@ -522,15 +522,16 @@ static struct usb_interface_descriptor rndis_gsi_data_intf = { /* .iInterface = DYNAMIC */ }; +/* Supports "Wireless" RNDIS; auto-detected by Windows */ static struct usb_interface_assoc_descriptor rndis_gsi_iad_descriptor = { .bLength = sizeof(rndis_gsi_iad_descriptor), .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, .bFirstInterface = 0, /* XXX, hardcoded */ .bInterfaceCount = 2, /* control + data */ - .bFunctionClass = USB_CLASS_COMM, - .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, - .bFunctionProtocol = USB_CDC_PROTO_NONE, + .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER, + .bFunctionSubClass = 0x01, + .bFunctionProtocol = 0x03, /* .iFunction = DYNAMIC */ }; diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c index aa186781ef22..5e50fe245a59 100644 --- a/drivers/usb/gadget/function/f_mtp.c +++ b/drivers/usb/gadget/function/f_mtp.c @@ -1876,7 +1876,8 @@ struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi, dev->function.disable = mtp_function_disable; dev->function.setup = mtp_ctrlreq_configfs; dev->function.free_func = mtp_free; - dev->is_ptp = mtp_config; + dev->is_ptp = !mtp_config; + fi->f = &dev->function; return &dev->function; } diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index 7ad798ace1e5..4e35ed9654b7 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -333,6 +333,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = { NULL, }; +/* Super Speed Support */ +static struct usb_endpoint_descriptor ncm_ss_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS), +}; + +static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = { + .bLength = sizeof(ncm_ss_notify_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* the following 3 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT), +}; + +static struct usb_endpoint_descriptor ncm_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = { + .bLength = sizeof(ncm_ss_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_endpoint_descriptor ncm_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = { + .bLength = sizeof(ncm_ss_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *ncm_ss_function[] = { + (struct usb_descriptor_header *) &ncm_iad_desc, + /* CDC NCM control descriptors */ + (struct usb_descriptor_header *) &ncm_control_intf, + (struct usb_descriptor_header *) &ncm_header_desc, + (struct usb_descriptor_header *) &ncm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + (struct usb_descriptor_header *) &ncm_desc, + (struct usb_descriptor_header *) &ncm_ss_notify_desc, + (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc, + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ncm_data_nop_intf, + (struct usb_descriptor_header *) &ncm_data_intf, + (struct usb_descriptor_header *) &ncm_ss_in_desc, + (struct usb_descriptor_header *) &ncm_ss_in_comp_desc, + (struct usb_descriptor_header *) &ncm_ss_out_desc, + (struct usb_descriptor_header *) &ncm_ss_out_comp_desc, + NULL, +}; + /* string descriptors: */ #define STRING_CTRL_IDX 0 @@ -1431,8 +1502,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) hs_ncm_notify_desc.bEndpointAddress = fs_ncm_notify_desc.bEndpointAddress; + if (gadget_is_superspeed(c->cdev->gadget)) { + ncm_ss_in_desc.bEndpointAddress = + fs_ncm_in_desc.bEndpointAddress; + ncm_ss_out_desc.bEndpointAddress = + fs_ncm_out_desc.bEndpointAddress; + ncm_ss_notify_desc.bEndpointAddress = + fs_ncm_notify_desc.bEndpointAddress; + } + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, - NULL); + ncm_ss_function); if (status) goto fail; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index dd7669331d00..b30831ef4014 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -116,17 +116,20 @@ int xhci_halt(struct xhci_hcd *xhci) STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); if (!ret) { xhci->xhc_state |= XHCI_STATE_HALTED; - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; - - if (timer_pending(&xhci->cmd_timer)) { - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Cleanup command queue"); - del_timer(&xhci->cmd_timer); - xhci_cleanup_command_queue(xhci); - } - } else + } else { xhci_warn(xhci, "Host not halted after %u microseconds.\n", XHCI_MAX_HALT_USEC); + } + + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + + if (timer_pending(&xhci->cmd_timer)) { + xhci_dbg_trace(xhci, trace_xhci_dbg_init, + "Cleanup command queue"); + del_timer(&xhci->cmd_timer); + xhci_cleanup_command_queue(xhci); + } + return ret; } diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index a1451a2d4826..12b98017beb2 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -2267,12 +2267,15 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle) struct platform_device *pdev; struct device *pd_dev; + if (!usbpd_class.p) /* usbpd_init() not yet called */ + return ERR_PTR(-EAGAIN); + if (!dev->of_node) - return ERR_PTR(-ENODEV); + return ERR_PTR(-EINVAL); pd_np = of_parse_phandle(dev->of_node, phandle, 0); if (!pd_np) - return ERR_PTR(-ENODEV); + return ERR_PTR(-ENXIO); pdev = of_find_device_by_node(pd_np); if (!pdev) @@ -2282,7 +2285,8 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle) match_usbpd_device); if (!pd_dev) { platform_device_put(pdev); - return ERR_PTR(-ENODEV); + /* device was found but maybe hadn't probed yet, so defer */ + return ERR_PTR(-EPROBE_DEFER); } ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL); @@ -2294,7 +2298,7 @@ struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle) pd = dev_get_drvdata(pd_dev); if (!pd) - return ERR_PTR(-ENODEV); + return ERR_PTR(-EPROBE_DEFER); *ptr = pd; devres_add(dev, ptr); diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h index a5368cdf2254..55918d47a21a 100644 --- a/drivers/video/fbdev/msm/mdss.h +++ b/drivers/video/fbdev/msm/mdss.h @@ -193,6 +193,7 @@ enum mdss_qos_settings { MDSS_QOS_REMAPPER, MDSS_QOS_IB_NOCR, MDSS_QOS_WB2_WRITE_GATHER_EN, + MDSS_QOS_WB_QOS, MDSS_QOS_MAX, }; diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c index 57e18a7dc5e1..72b262e8171a 100644 --- a/drivers/video/fbdev/msm/mdss_dp.c +++ b/drivers/video/fbdev/msm/mdss_dp.c @@ -45,6 +45,18 @@ #define VDDA_UA_ON_LOAD 100000 /* uA units */ #define VDDA_UA_OFF_LOAD 100 /* uA units */ +#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3 +static u32 supported_modes[] = { + HDMI_VFRMT_640x480p60_4_3, + HDMI_VFRMT_720x480p60_4_3, HDMI_VFRMT_720x480p60_16_9, + HDMI_VFRMT_1280x720p60_16_9, + HDMI_VFRMT_1920x1080p60_16_9, + HDMI_VFRMT_3840x2160p24_16_9, HDMI_VFRMT_3840x2160p30_16_9, + HDMI_VFRMT_3840x2160p60_16_9, + HDMI_VFRMT_4096x2160p24_256_135, HDMI_VFRMT_4096x2160p30_256_135, + HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9 +}; + static void mdss_dp_put_dt_clk_data(struct device *dev, struct dss_module_power *module_power) { @@ -789,17 +801,34 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp) cap = &dp->dpcd; - data = dp->lane_cnt - 1; - data <<= 4; + data |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */ + + /* Color Format */ + switch (dp->panel_data.panel_info.out_format) { + case MDP_Y_CBCR_H2V2: + data |= (1 << 11); /* YUV420 */ + break; + case MDP_Y_CBCR_H2V1: + data |= (2 << 11); /* YUV422 */ + break; + default: + data |= (0 << 11); /* RGB */ + break; + } + + /* Scrambler reset enable */ + if (cap->scrambler_reset) + data |= (1 << 10); + + if (dp->edid.color_depth != 6) + data |= 0x100; /* Default: 8 bits */ + + /* Num of Lanes */ + data |= ((dp->lane_cnt - 1) << 4); if (cap->enhanced_frame) data |= 0x40; - if (dp->edid.color_depth == 8) { - /* 0 == 6 bits, 1 == 8 bits */ - data |= 0x100; /* bit 8 */ - } - if (!timing->interlaced) /* progressive */ data |= 0x04; @@ -863,6 +892,8 @@ static int dp_audio_info_setup(struct platform_device *pdev, mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt); mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true); + dp_ctrl->wait_for_audio_comp = true; + return rc; } /* dp_audio_info_setup */ @@ -885,6 +916,17 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev, return rc; } /* dp_get_audio_edid_blk */ +static void dp_audio_codec_teardown_done(struct platform_device *pdev) +{ + struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev); + + if (!dp) + pr_err("invalid input\n"); + + pr_debug("audio codec teardown done\n"); + complete_all(&dp->audio_comp); +} + static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp) { int ret = 0; @@ -906,6 +948,8 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp) dp_get_audio_edid_blk; dp->ext_audio_data.codec_ops.cable_status = dp_get_cable_status; + dp->ext_audio_data.codec_ops.teardown_done = + dp_audio_codec_teardown_done; if (!dp->pdev->dev.of_node) { pr_err("%s cannot find dp dev.of_node\n", __func__); @@ -936,8 +980,6 @@ end: return ret; } -#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3 - static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic) { struct mdss_panel_info *pinfo; @@ -949,7 +991,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic) return -EINVAL; } - dp_drv->ds_data.ds_registered = false; ret = hdmi_get_supported_mode(&timing, &dp_drv->ds_data, vic); pinfo = &dp_drv->panel_data.panel_info; @@ -987,6 +1028,13 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic) return 0; } /* dp_init_panel_info */ +static inline void mdss_dp_set_audio_switch_node( + struct mdss_dp_drv_pdata *dp, int val) +{ + if (dp && dp->ext_audio_data.intf_ops.notify) + dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, + val); +} int mdss_dp_on(struct mdss_panel_data *pdata) { @@ -1054,6 +1102,9 @@ int mdss_dp_on(struct mdss_panel_data *pdata) goto exit; } + mdss_dp_phy_share_lane_config(&dp_drv->phy_io, + orientation, dp_drv->dpcd.max_lane_count); + pr_debug("link_rate = 0x%x\n", dp_drv->link_rate); dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate = @@ -1096,6 +1147,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata) pr_debug("mainlink ready\n"); dp_drv->power_on = true; + mdss_dp_set_audio_switch_node(dp_drv, true); pr_debug("End-\n"); exit: @@ -1119,14 +1171,15 @@ int mdss_dp_off(struct mdss_panel_data *pdata) mutex_lock(&dp_drv->train_mutex); reinit_completion(&dp_drv->idle_comp); - - mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0); + mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE); if (dp_drv->link_clks_on) mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false); mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false); + mdss_dp_audio_enable(&dp_drv->ctrl_io, false); + mdss_dp_irq_disable(dp_drv); mdss_dp_config_gpios(dp_drv, false); @@ -1147,14 +1200,6 @@ int mdss_dp_off(struct mdss_panel_data *pdata) return 0; } -static inline void mdss_dp_set_audio_switch_node( - struct mdss_dp_drv_pdata *dp, int val) -{ - if (dp && dp->ext_audio_data.intf_ops.notify) - dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, - val); -} - static void mdss_dp_send_cable_notification( struct mdss_dp_drv_pdata *dp, int val) { @@ -1169,6 +1214,38 @@ static void mdss_dp_send_cable_notification( dp->ext_audio_data.type, val); } +static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp) +{ + const int audio_completion_timeout_ms = HZ * 3; + int ret = 0; + + if (!dp->wait_for_audio_comp) + return; + + reinit_completion(&dp->audio_comp); + ret = wait_for_completion_timeout(&dp->audio_comp, + audio_completion_timeout_ms); + if (ret <= 0) + pr_warn("audio codec teardown timed out\n"); + + dp->wait_for_audio_comp = false; +} + +static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable) +{ + if (enable) { + mdss_dp_send_cable_notification(dp, enable); + } else { + mdss_dp_set_audio_switch_node(dp, enable); + mdss_dp_audio_codec_wait(dp); + mdss_dp_send_cable_notification(dp, enable); + } + + pr_debug("notify state %s done\n", + enable ? "ENABLE" : "DISABLE"); +} + + static int mdss_dp_edid_init(struct mdss_panel_data *pdata) { struct mdss_dp_drv_pdata *dp_drv = NULL; @@ -1183,6 +1260,10 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata) dp_drv = container_of(pdata, struct mdss_dp_drv_pdata, panel_data); + dp_drv->ds_data.ds_registered = true; + dp_drv->ds_data.modes_num = ARRAY_SIZE(supported_modes); + dp_drv->ds_data.modes = supported_modes; + dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ; edid_init_data.kobj = dp_drv->kobj; edid_init_data.ds_data = dp_drv->ds_data; @@ -1236,15 +1317,19 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata) mdss_dp_aux_init(dp_drv); + mdss_dp_phy_initialize(dp_drv); + mdss_dp_ctrl_reset(&dp_drv->ctrl_io); mdss_dp_phy_reset(&dp_drv->ctrl_io); mdss_dp_aux_reset(&dp_drv->ctrl_io); - mdss_dp_phy_initialize(dp_drv); mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true); pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n", mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io), mdss_dp_get_phy_hw_version(&dp_drv->phy_io)); + pr_debug("plug Orientation = %d\n", + usbpd_get_plug_orientation(dp_drv->pd)); + mdss_dp_phy_aux_setup(&dp_drv->phy_io); mdss_dp_irq_enable(dp_drv); @@ -1264,8 +1349,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata) goto edid_error; } - mdss_dp_send_cable_notification(dp_drv, true); - mdss_dp_set_audio_switch_node(dp_drv, true); + mdss_dp_notify_clients(dp_drv, true); dp_drv->dp_initialized = true; return ret; @@ -1771,8 +1855,7 @@ static void dp_send_events(struct mdss_dp_drv_pdata *dp, u32 events) { spin_lock(&dp->event_lock); dp->current_event = events; - queue_delayed_work(dp->workq, - &dp->dwork, HZ); + queue_delayed_work(dp->workq, &dp->dwork, HZ / 100); spin_unlock(&dp->event_lock); } @@ -1883,8 +1966,7 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr) mutex_lock(&dp_drv->pd_msg_mutex); dp_drv->cable_connected = false; mutex_unlock(&dp_drv->pd_msg_mutex); - mdss_dp_send_cable_notification(dp_drv, false); - mdss_dp_set_audio_switch_node(dp_drv, false); + mdss_dp_notify_clients(dp_drv, false); } static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd, @@ -2135,6 +2217,8 @@ static int mdss_dp_probe(struct platform_device *pdev) mdss_dp_device_register(dp_drv); dp_drv->inited = true; + dp_drv->wait_for_audio_comp = false; + init_completion(&dp_drv->audio_comp); pr_debug("done\n"); diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h index 4710cf7a98e2..ddadb7b6709c 100644 --- a/drivers/video/fbdev/msm/mdss_dp.h +++ b/drivers/video/fbdev/msm/mdss_dp.h @@ -399,6 +399,7 @@ struct mdss_dp_drv_pdata { struct completion train_comp; struct completion idle_comp; struct completion video_comp; + struct completion audio_comp; struct mutex aux_mutex; struct mutex train_mutex; struct mutex pd_msg_mutex; @@ -423,6 +424,7 @@ struct mdss_dp_drv_pdata { char delay_start; u32 bpp; struct dp_statistic dp_stat; + bool wait_for_audio_comp; /* event */ struct workqueue_struct *workq; diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c index d9297a7af764..27e982437961 100644 --- a/drivers/video/fbdev/msm/mdss_dp_aux.c +++ b/drivers/video/fbdev/msm/mdss_dp_aux.c @@ -510,11 +510,20 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt) pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n", pinfo->clk_rate, pinfo->bpp, lane_cnt); - min_link_rate = (u32)div_u64((pinfo->clk_rate * 10), - (lane_cnt * encoding_factx10)); - min_link_rate = (min_link_rate * pinfo->bpp) - / (DP_LINK_RATE_MULTIPLIER); + + /* + * The max pixel clock supported is 675Mhz. The + * current calculations below will make sure + * the min_link_rate is within 32 bit limits. + * Any changes in the section of code should + * consider this limitation. + */ + min_link_rate = pinfo->clk_rate + / (lane_cnt * encoding_factx10); min_link_rate /= ln_to_link_ratio; + min_link_rate = (min_link_rate * pinfo->bpp); + min_link_rate = (u32)div_u64(min_link_rate * 10, + DP_LINK_RATE_MULTIPLIER); pr_debug("min_link_rate = %d\n", min_link_rate); @@ -1113,17 +1122,17 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train) } char vm_pre_emphasis[4][4] = { - {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */ - {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */ - {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */ - {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ + {0x00, 0x09, 0x11, 0x0C}, /* pe0, 0 db */ + {0x00, 0x0A, 0x10, 0xFF}, /* pe1, 3.5 db */ + {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */ + {0x00, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */ }; /* voltage swing, 0.2v and 1.0v are not support */ char vm_voltage_swing[4][4] = { - {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */ - {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */ - {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */ + {0x07, 0x0f, 0x12, 0x1E}, /* sw0, 0.4v */ + {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */ + {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */ {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */ }; @@ -1375,7 +1384,8 @@ train_start: clear: dp_clear_training_pattern(dp); if (ret != -1) { - mdss_dp_setup_tr_unit(&dp->ctrl_io); + mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate, + dp->lane_cnt, dp->vic); mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO); pr_debug("State_ctrl set to SEND_VIDEO\n"); } diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c index bdf5d92f7053..f1245a024a88 100644 --- a/drivers/video/fbdev/msm/mdss_dp_util.c +++ b/drivers/video/fbdev/msm/mdss_dp_util.c @@ -143,6 +143,18 @@ void mdss_dp_aux_reset(struct dss_io_data *ctrl_io) writel_relaxed(aux_ctrl, ctrl_io->base + DP_AUX_CTRL); } +/* reset DP controller */ +void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io) +{ + u32 sw_reset = readl_relaxed(ctrl_io->base + DP_SW_RESET); + + sw_reset |= BIT(0); + writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET); + udelay(1000); + sw_reset &= ~BIT(0); + writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET); +} + /* reset DP Mainlink */ void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io) { @@ -284,13 +296,47 @@ void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io) writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID); } -void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io) +void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate, + u8 ln_cnt, u32 res) { - /* Current Tr unit configuration supports only 1080p */ + u32 dp_tu = 0x0; + u32 valid_boundary = 0x0; + u32 valid_boundary2 = 0x0; + struct dp_vc_tu_mapping_table const *tu_entry = tu_table; + writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0); - writel_relaxed(0x0f0016, ctrl_io->base + DP_VALID_BOUNDARY); - writel_relaxed(0x1f, ctrl_io->base + DP_TU); - writel_relaxed(0x0, ctrl_io->base + DP_VALID_BOUNDARY_2); + + for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) { + if ((tu_entry->vic == res) && + (tu_entry->lanes == ln_cnt) && + (tu_entry->lrate == link_rate)) + break; + } + + if (tu_entry == tu_table + ARRAY_SIZE(tu_table)) { + pr_err("requested ln_cnt=%d, lrate=0x%x not supported\n", + ln_cnt, link_rate); + return; + } + + dp_tu |= tu_entry->tu_size_minus1; + valid_boundary |= tu_entry->valid_boundary_link; + valid_boundary |= (tu_entry->delay_start_link << 16); + + valid_boundary2 |= (tu_entry->valid_lower_boundary_link << 1); + valid_boundary2 |= (tu_entry->upper_boundary_count << 16); + valid_boundary2 |= (tu_entry->lower_boundary_count << 20); + + if (tu_entry->boundary_moderation_en) + valid_boundary2 |= BIT(0); + + writel_relaxed(valid_boundary, ctrl_io->base + DP_VALID_BOUNDARY); + writel_relaxed(dp_tu, ctrl_io->base + DP_TU); + writel_relaxed(valid_boundary2, ctrl_io->base + DP_VALID_BOUNDARY_2); + + pr_debug("valid_boundary=0x%x, valid_boundary2=0x%x\n", + valid_boundary, valid_boundary2); + pr_debug("dp_tu=0x%x\n", dp_tu); } void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, @@ -441,6 +487,17 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp) return config; } +void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io, + u8 orientation, u8 ln_cnt) +{ + u32 info = 0x0; + + info |= (ln_cnt & 0x0F); + info |= ((orientation & 0x0F) << 4); + pr_debug("Shared Info = 0x%x\n", info); + writel_relaxed(info, phy_io->base + DP_PHY_SPARE0); +} + void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate) { u32 acr_ctrl = 0; diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h index 5eb9d092476f..cf2286f9b58a 100644 --- a/drivers/video/fbdev/msm/mdss_dp_util.h +++ b/drivers/video/fbdev/msm/mdss_dp_util.h @@ -150,6 +150,8 @@ #define DP_PHY_AUX_INTERRUPT_MASK (0x00000044) #define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048) +#define DP_PHY_SPARE0 0x00A8 + #define QSERDES_TX0_OFFSET 0x0400 #define QSERDES_TX1_OFFSET 0x0800 @@ -200,17 +202,72 @@ struct edp_cmd { char next; /* next command */ }; +struct dp_vc_tu_mapping_table { + u32 vic; + u8 lanes; + u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20) */ + u8 bpp; + u8 valid_boundary_link; + u16 delay_start_link; + bool boundary_moderation_en; + u8 valid_lower_boundary_link; + u8 upper_boundary_count; + u8 lower_boundary_count; + u8 tu_size_minus1; +}; + +static const struct dp_vc_tu_mapping_table tu_table[] = { + {HDMI_VFRMT_640x480p60_4_3, 4, 06, 24, + 0x07, 0x0056, false, 0x00, 0x00, 0x00, 0x3b}, + {HDMI_VFRMT_640x480p60_4_3, 2, 06, 24, + 0x0e, 0x004f, false, 0x00, 0x00, 0x00, 0x3b}, + {HDMI_VFRMT_640x480p60_4_3, 1, 06, 24, + 0x15, 0x0039, false, 0x00, 0x00, 0x00, 0x2c}, + {HDMI_VFRMT_720x480p60_4_3, 1, 06, 24, + 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24}, + {HDMI_VFRMT_720x480p60_16_9, 1, 06, 24, + 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24}, + {HDMI_VFRMT_1280x720p60_16_9, 4, 06, 24, + 0x0c, 0x0020, false, 0x00, 0x00, 0x00, 0x1f}, + {HDMI_VFRMT_1280x720p60_16_9, 2, 06, 24, + 0x16, 0x0015, false, 0x00, 0x00, 0x00, 0x1f}, + {HDMI_VFRMT_1280x720p60_16_9, 1, 10, 24, + 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_1920x1080p60_16_9, 4, 06, 24, + 0x16, 0x000f, false, 0x00, 0x00, 0x00, 0x1f}, + {HDMI_VFRMT_1920x1080p60_16_9, 2, 10, 24, + 0x21, 0x0011, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_1920x1080p60_16_9, 1, 20, 24, + 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_3840x2160p24_16_9, 4, 10, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_3840x2160p30_16_9, 4, 10, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_3840x2160p60_16_9, 4, 20, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_4096x2160p24_256_135, 4, 10, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_4096x2160p30_256_135, 4, 10, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_VFRMT_4096x2160p60_256_135, 4, 20, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, + {HDMI_EVFRMT_4096x2160p24_16_9, 4, 10, 24, + 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27}, +}; + int dp_aux_read(void *ep, struct edp_cmd *cmds); int dp_aux_write(void *ep, struct edp_cmd *cmd); void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data); u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io); u32 mdss_dp_get_phy_hw_version(struct dss_io_data *phy_io); +void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io); void mdss_dp_aux_reset(struct dss_io_data *ctrl_io); void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io); void mdss_dp_phy_reset(struct dss_io_data *ctrl_io); void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io); void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert); -void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io); +void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate, + u8 ln_cnt, u32 res); void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io); void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable); void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable); @@ -231,6 +288,8 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status); u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp); void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, struct lane_mapping l_map); +void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io, + u8 orientation, u8 ln_cnt); void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate); void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io); diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c index c145f72c3c70..66cd99720afa 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.c +++ b/drivers/video/fbdev/msm/mdss_dsi.c @@ -357,7 +357,7 @@ static int mdss_dsi_panel_power_lp(struct mdss_panel_data *pdata, int enable) static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, int power_state) { - int ret; + int ret = 0; struct mdss_panel_info *pinfo; if (pdata == NULL) { @@ -383,7 +383,11 @@ static int mdss_dsi_panel_power_ctrl(struct mdss_panel_data *pdata, switch (power_state) { case MDSS_PANEL_POWER_OFF: - ret = mdss_dsi_panel_power_off(pdata); + case MDSS_PANEL_POWER_LCD_DISABLED: + /* if LCD has not been disabled, then disable it now */ + if ((pinfo->panel_power_state != MDSS_PANEL_POWER_LCD_DISABLED) + && (pinfo->panel_power_state != MDSS_PANEL_POWER_OFF)) + ret = mdss_dsi_panel_power_off(pdata); break; case MDSS_PANEL_POWER_ON: if (mdss_dsi_is_panel_on_lp(pdata)) @@ -2469,6 +2473,7 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata, int power_state; u32 mode; struct mdss_panel_info *pinfo; + int ret; if (pdata == NULL) { pr_err("%s: Invalid input data\n", __func__); @@ -2529,6 +2534,20 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata, rc = mdss_dsi_blank(pdata, power_state); rc = mdss_dsi_off(pdata, power_state); break; + case MDSS_EVENT_DISABLE_PANEL: + /* disable esd thread */ + disable_esd_thread(); + + /* disable backlight */ + ctrl_pdata->panel_data.set_backlight(pdata, 0); + + /* send the off commands */ + ctrl_pdata->off(pdata); + + /* disable panel power */ + ret = mdss_dsi_panel_power_ctrl(pdata, + MDSS_PANEL_POWER_LCD_DISABLED); + break; case MDSS_EVENT_CONT_SPLASH_FINISH: if (ctrl_pdata->off_cmds.link_state == DSI_LP_MODE) rc = mdss_dsi_blank(pdata, MDSS_PANEL_POWER_OFF); diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h index bd1854092c6a..7091dc2f38b9 100644 --- a/drivers/video/fbdev/msm/mdss_dsi.h +++ b/drivers/video/fbdev/msm/mdss_dsi.h @@ -614,6 +614,7 @@ int mdss_dsi_wait_for_lane_idle(struct mdss_dsi_ctrl_pdata *ctrl); irqreturn_t mdss_dsi_isr(int irq, void *ptr); irqreturn_t hw_vsync_handler(int irq, void *data); +void disable_esd_thread(void); void mdss_dsi_irq_handler_config(struct mdss_dsi_ctrl_pdata *ctrl_pdata); void mdss_dsi_set_tx_power_mode(int mode, struct mdss_panel_data *pdata); diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c index e8d68059581f..01fc01425a3a 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_panel.c +++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c @@ -667,6 +667,11 @@ static void mdss_dsi_panel_bl_ctrl(struct mdss_panel_data *pdata, * for the backlight brightness. If the brightness is less * than it, the controller can malfunction. */ + pr_debug("%s: bl_level:%d\n", __func__, bl_level); + + /* do not allow backlight to change when panel in disable mode */ + if (pdata->panel_disable_mode && (bl_level != 0)) + return; if ((bl_level < pdata->panel_info.bl_min) && (bl_level != 0)) bl_level = pdata->panel_info.bl_min; @@ -851,6 +856,48 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata, return 0; } +static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np, + struct mdss_panel_info *pinfo) +{ + int len, rc; + const u32 *src; + u32 tmp; + u32 max_delay_us; + + pinfo->mdp_koff_thshold = false; + src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len); + if (!src || (len == 0)) + return; + + rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp); + if (!rc) + pinfo->mdp_koff_delay = tmp; + else + return; + + if (pinfo->mipi.frame_rate == 0) { + pr_err("cannot enable guard window, unexpected panel fps\n"); + return; + } + + pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]); + pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]); + max_delay_us = 1000000 / pinfo->mipi.frame_rate; + + /* enable the feature if threshold is valid */ + if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) && + ((pinfo->mdp_koff_delay > 0) || + (pinfo->mdp_koff_delay < max_delay_us))) + pinfo->mdp_koff_thshold = true; + + pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n", + pinfo->mdp_koff_thshold_low, + pinfo->mdp_koff_thshold_high, + pinfo->mdp_koff_delay, + max_delay_us, + pinfo->mdp_koff_thshold); +} + static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger, char *trigger_key) { @@ -2492,6 +2539,8 @@ static int mdss_panel_parse_dt(struct device_node *np, rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp); pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME); + mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo); + pinfo->mipi.lp11_init = of_property_read_bool(np, "qcom,mdss-dsi-lp11-init"); rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp); diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c index bf545ae311f2..4208c2c43efb 100644 --- a/drivers/video/fbdev/msm/mdss_dsi_status.c +++ b/drivers/video/fbdev/msm/mdss_dsi_status.c @@ -101,6 +101,16 @@ irqreturn_t hw_vsync_handler(int irq, void *data) } /* + * disable_esd_thread() - Cancels work item for the esd check. + */ +void disable_esd_thread(void) +{ + if (pstatus_data && + cancel_delayed_work(&pstatus_data->check_status)) + pr_debug("esd thread killed\n"); +} + +/* * fb_event_callback() - Call back function for the fb_register_client() * notifying events * @self : notifier block diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c index 50c7015c6731..fc8d3898351e 100644 --- a/drivers/video/fbdev/msm/mdss_fb.c +++ b/drivers/video/fbdev/msm/mdss_fb.c @@ -1950,6 +1950,9 @@ static int mdss_fb_blank(int blank_mode, struct fb_info *info) pdata->panel_info.is_lpm_mode = false; } + if (pdata->panel_disable_mode) + mdss_mdp_enable_panel_disable_mode(mfd, false); + return mdss_fb_blank_sub(blank_mode, info, mfd->op_enable); } diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c index 9ed909e9a387..b3d929b15b44 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.c +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c @@ -560,7 +560,7 @@ int msm_hdmi_get_timing_info( int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info, struct hdmi_util_ds_data *ds_data, u32 mode) { - int ret; + int ret, i = 0; if (!info) return -EINVAL; @@ -570,9 +570,23 @@ int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info, ret = msm_hdmi_get_timing_info(info, mode); - if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) { - if (info->pixel_freq > ds_data->ds_max_clk) - info->supported = false; + if (!ret && ds_data && ds_data->ds_registered) { + if (ds_data->ds_max_clk) { + if (info->pixel_freq > ds_data->ds_max_clk) + info->supported = false; + } + + if (ds_data->modes_num) { + u32 *modes = ds_data->modes; + + for (i = 0; i < ds_data->modes_num; i++) { + if (info->video_format == *modes++) + break; + } + + if (i == ds_data->modes_num) + info->supported = false; + } } return ret; diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h index e65cf915fe92..8a7e4d1ebafc 100644 --- a/drivers/video/fbdev/msm/mdss_hdmi_util.h +++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h @@ -459,6 +459,8 @@ struct hdmi_tx_ddc_ctrl { struct hdmi_util_ds_data { bool ds_registered; u32 ds_max_clk; + u32 modes_num; + u32 *modes; }; static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t) diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c index 81e3438befca..6845b386807b 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.c +++ b/drivers/video/fbdev/msm/mdss_mdp.c @@ -1992,6 +1992,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata) set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map); set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map); + set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map); + set_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map); set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map); set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */ mdata->enable_cdp = false; /* disable cdp */ diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h index 921391dc4bde..8ac63aaaefce 100644 --- a/drivers/video/fbdev/msm/mdss_mdp.h +++ b/drivers/video/fbdev/msm/mdss_mdp.h @@ -122,6 +122,11 @@ */ #define MDSS_MDP_DS_OVERFETCH_SIZE 5 +#define QOS_LUT_NRT_READ 0x0 +#define QOS_LUT_CWB_READ 0xe4000000 +#define PANIC_LUT_NRT_READ 0x0 +#define ROBUST_LUT_NRT_READ 0xFFFF + /* hw cursor can only be setup in highest mixer stage */ #define HW_CURSOR_STAGE(mdata) \ (((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1) @@ -394,6 +399,8 @@ struct mdss_mdp_ctl_intfs_ops { enum dynamic_switch_modes mode, bool pre); /* called before do any register programming from commit thread */ void (*pre_programming)(struct mdss_mdp_ctl *ctl); + /* called to do any interface programming for the panel disable mode */ + void (*panel_disable_cfg)(struct mdss_mdp_ctl *ctl, bool disable); /* to update lineptr, [1..yres] - enable, 0 - disable */ int (*update_lineptr)(struct mdss_mdp_ctl *ctl, bool enable); @@ -405,7 +412,7 @@ struct mdss_mdp_cwb { struct list_head data_queue; int valid; u32 wb_idx; - struct mdp_output_layer *layer; + struct mdp_output_layer layer; void *priv_data; struct msm_sync_pt_data cwb_sync_pt_data; struct blocking_notifier_head notifier_head; @@ -1863,6 +1870,8 @@ int mdss_mdp_cmd_set_autorefresh_mode(struct mdss_mdp_ctl *ctl, int frame_cnt); int mdss_mdp_cmd_get_autorefresh_mode(struct mdss_mdp_ctl *ctl); int mdss_mdp_ctl_cmd_set_autorefresh(struct mdss_mdp_ctl *ctl, int frame_cnt); int mdss_mdp_ctl_cmd_get_autorefresh(struct mdss_mdp_ctl *ctl); +int mdss_mdp_enable_panel_disable_mode(struct msm_fb_data_type *mfd, + bool disable_panel); int mdss_mdp_pp_get_version(struct mdp_pp_feature_version *version); int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd, struct mdp_layer_commit_v1 *commit); diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c index ebc7d2144eb9..eb1e0b5c47a6 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c +++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c @@ -3424,6 +3424,7 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl) mutex_lock(&cwb->queue_lock); cwb_data = list_first_entry_or_null(&cwb->data_queue, struct mdss_mdp_wb_data, next); + __list_del_entry(&cwb_data->next); mutex_unlock(&cwb->queue_lock); if (cwb_data == NULL) { pr_err("no output buffer for cwb\n"); @@ -3453,14 +3454,14 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl) sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE; /* Select CWB data point */ - data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0; + data_point = (cwb->layer.flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0; writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]); if (sctl) writel_relaxed(data_point + 1, mdata->mdp_base + mdata->ppb_ctl[3]); - /* Flush WB */ - ctl->flush_bits |= BIT(16); + /* Flush WB and CTL */ + ctl->flush_bits |= BIT(16) | BIT(17); opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode; mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode); @@ -3469,6 +3470,10 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl) sctl->opmode; mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode); } + + /* Increase commit count to signal CWB release fence */ + atomic_inc(&cwb->cwb_sync_pt_data.commit_cnt); + goto cwb_setup_done; cwb_setup_fail: diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h index 76fd2d12ac95..294e05c2fbb0 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h +++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h @@ -541,6 +541,10 @@ enum mdss_mdp_writeback_index { #define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C #define MDSS_MDP_REG_WB_OUT_SIZE 0x074 #define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078 +#define MDSS_MDP_REG_WB_DANGER_LUT 0x084 +#define MDSS_MDP_REG_WB_SAFE_LUT 0x088 +#define MDSS_MDP_REG_WB_CREQ_LUT 0x08c +#define MDSS_MDP_REG_WB_QOS_CTRL 0x090 #define MDSS_MDP_REG_WB_CSC_BASE 0x260 #define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0 #define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4 diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c index 2c2dc6f18fd9..4eb121f01aca 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c @@ -73,6 +73,7 @@ struct mdss_mdp_cmd_ctx { struct mutex clk_mtx; spinlock_t clk_lock; spinlock_t koff_lock; + spinlock_t ctlstart_lock; struct work_struct gate_clk_work; struct delayed_work delayed_off_clk_work; struct work_struct pp_done_work; @@ -144,15 +145,11 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl) u32 init; u32 height; - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); - mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT); if (!mixer) { mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT); - if (!mixer) { - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + if (!mixer) goto exit; - } } init = mdss_mdp_pingpong_read(mixer->pingpong_base, @@ -160,10 +157,8 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl) height = mdss_mdp_pingpong_read(mixer->pingpong_base, MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff; - if (height < init) { - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + if (height < init) goto exit; - } cnt = mdss_mdp_pingpong_read(mixer->pingpong_base, MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff; @@ -173,13 +168,21 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl) else cnt -= init; - mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); - pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height); exit: return cnt; } +static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl) +{ + u32 ret; + + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON); + ret = mdss_mdp_cmd_line_count(ctl); + mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF); + return ret; +} + static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); @@ -295,9 +298,9 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer, __func__, pinfo->yres, vclks_line, te->sync_cfg_height, te->vsync_init_val, te->rd_ptr_irq, te->start_pos, te->wr_ptr_irq); - pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d\n", + pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d hw_vsync_mode:%d\n", te->sync_threshold_start, te->sync_threshold_continue, - ctx->pingpong_split_slave); + ctx->pingpong_split_slave, pinfo->mipi.hw_vsync_mode); pingpong_base = mixer->pingpong_base; @@ -2130,6 +2133,88 @@ static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl, } /* + * This function will be called from the sysfs node to tear down or restore + * any dependencies of the interface to disable the panel + */ +void mdss_mdp_cmd_panel_disable_cfg(struct mdss_mdp_ctl *ctl, + bool disable) +{ + struct mdss_panel_info *pinfo, *spinfo = NULL; + struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL; + + pinfo = &ctl->panel_data->panel_info; + mutex_lock(&ctl->offlock); + + if ((pinfo->sim_panel_mode == SIM_MODE) || + ((!ctl->panel_data->panel_disable_mode) && + (pinfo->mipi.hw_vsync_mode == 0))) { + pr_err("te already in simulaiton mode\n"); + goto exit; + } + + ctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[MASTER_CTX]; + if (is_pingpong_split(ctl->mfd)) { + sctx = (struct mdss_mdp_cmd_ctx *)ctl->intf_ctx[SLAVE_CTX]; + } else if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) { + struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl); + + if (sctl) { + sctx = (struct mdss_mdp_cmd_ctx *) + sctl->intf_ctx[MASTER_CTX]; + spinfo = &sctl->panel_data->panel_info; + } + } + + if (disable) { + /* cache the te params */ + memcpy(&pinfo->te_cached, &pinfo->te, + sizeof(struct mdss_mdp_pp_tear_check)); + pinfo->mipi.hw_vsync_mode = 0; + + if (spinfo) { + spinfo->mipi.hw_vsync_mode = 0; + memcpy(&spinfo->te_cached, &spinfo->te, + sizeof(struct mdss_mdp_pp_tear_check)); + } + + pr_debug("%s: update info\n", __func__); + /* update the te information to use sim mode */ + mdss_panel_override_te_params(pinfo); + if (spinfo) + mdss_panel_override_te_params(spinfo); + + pr_debug("%s: reconfig tear check\n", __func__); + /* reconfigure tear check, remove dependency to external te */ + if (mdss_mdp_cmd_tearcheck_setup(ctx, false)) { + pr_warn("%s: ctx%d tearcheck setup failed\n", __func__, + ctx->current_pp_num); + } else { + if (sctx && mdss_mdp_cmd_tearcheck_setup(sctx, false)) + pr_warn("%s: ctx%d tearcheck setup failed\n", + __func__, sctx->current_pp_num); + } + } else { + /* + * restore the information in the panel information, + * the actual programming will happen during restore + */ + pr_debug("%s: reset tear check\n", __func__); + memcpy(&pinfo->te, &pinfo->te_cached, + sizeof(struct mdss_mdp_pp_tear_check)); + pinfo->mipi.hw_vsync_mode = 1; + + if (spinfo) { + spinfo->mipi.hw_vsync_mode = 1; + memcpy(&spinfo->te, &spinfo->te_cached, + sizeof(struct mdss_mdp_pp_tear_check)); + } + } + +exit: + mutex_unlock(&ctl->offlock); +} + +/* * This function will be called from the sysfs node to enable and disable the * feature with master ctl only. */ @@ -2595,12 +2680,42 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl, return 0; } +static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl, + struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo) +{ + u32 line_count; + u32 sline_count = 0; + bool ret = true; + u32 low_threshold = pinfo->mdp_koff_thshold_low; + u32 high_threshold = pinfo->mdp_koff_thshold_high; + + /* read the line count */ + line_count = mdss_mdp_cmd_line_count(ctl); + if (sctl) + sline_count = mdss_mdp_cmd_line_count(sctl); + + /* if line count is between the range, return to trigger transfer */ + if (((line_count > low_threshold) && (line_count < high_threshold)) && + (!sctl || ((sline_count > low_threshold) && + (sline_count < high_threshold)))) + ret = false; + + pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold); + pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret); + MDSS_XLOG(line_count, sline_count, ret); + + return ret; +} static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl, - struct mdss_mdp_cmd_ctx *ctx) + struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); bool is_pp_split = is_pingpong_split(ctl->mfd); + struct mdss_panel_info *pinfo = NULL; + + if (ctl->panel_data) + pinfo = &ctl->panel_data->panel_info; MDSS_XLOG(ctx->autorefresh_state); @@ -2625,9 +2740,33 @@ static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl, ctx->autorefresh_state = MDP_AUTOREFRESH_ON; } else { + + /* + * Some panels can require that mdp is within some range + * of the scanlines in order to trigger the tansfer. + * If that is the case, make sure the panel scanline + * is within the limit to start. + * Acquire an spinlock for this operation to raise the + * priority of this thread and make sure the context + * is maintained, so we can have the less time possible + * between the check of the scanline and the kickoff. + */ + if (pinfo && pinfo->mdp_koff_thshold) { + spin_lock(&ctx->ctlstart_lock); + if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) { + spin_unlock(&ctx->ctlstart_lock); + usleep_range(pinfo->mdp_koff_delay, + pinfo->mdp_koff_delay + 10); + spin_lock(&ctx->ctlstart_lock); + } + } + /* SW Kickoff */ mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1); MDSS_XLOG(0x11, ctx->autorefresh_state); + + if (pinfo && pinfo->mdp_koff_thshold) + spin_unlock(&ctx->ctlstart_lock); } } @@ -2759,7 +2898,7 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg) } /* Kickoff */ - __mdss_mdp_kickoff(ctl, ctx); + __mdss_mdp_kickoff(ctl, sctl, ctx); mdss_mdp_cmd_post_programming(ctl); @@ -3185,6 +3324,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl, init_completion(&ctx->autorefresh_done); spin_lock_init(&ctx->clk_lock); spin_lock_init(&ctx->koff_lock); + spin_lock_init(&ctx->ctlstart_lock); mutex_init(&ctx->clk_mtx); mutex_init(&ctx->mdp_rdptr_lock); mutex_init(&ctx->mdp_wrptr_lock); @@ -3475,12 +3615,13 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl) ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong; ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler; ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler; - ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count; + ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper; ctl->ops.restore_fnc = mdss_mdp_cmd_restore; ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up; ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure; ctl->ops.pre_programming = mdss_mdp_cmd_pre_programming; ctl->ops.update_lineptr = mdss_mdp_cmd_update_lineptr; + ctl->ops.panel_disable_cfg = mdss_mdp_cmd_panel_disable_cfg; pr_debug("%s:-\n", __func__); return 0; diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c index 40b10e368309..e6e03e7d54b2 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c +++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c @@ -124,6 +124,30 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx, writel_relaxed(val, ctx->base + reg); } +static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl, + struct mdss_mdp_writeback_ctx *ctx) +{ + u32 wb_qos_setup = QOS_LUT_NRT_READ; + struct mdss_mdp_cwb *cwb = NULL; + struct mdss_overlay_private *mdp5_data; + struct mdss_data_type *mdata = mdss_mdp_get_mdata(); + + if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map)) + return; + + mdp5_data = mfd_to_mdp5_data(ctl->mfd); + cwb = &mdp5_data->cwb; + + if (cwb->valid) + wb_qos_setup = QOS_LUT_CWB_READ; + else + wb_qos_setup = QOS_LUT_NRT_READ; + + mdp_wb_write(ctx, MDSS_MDP_REG_WB_DANGER_LUT, PANIC_LUT_NRT_READ); + mdp_wb_write(ctx, MDSS_MDP_REG_WB_SAFE_LUT, ROBUST_LUT_NRT_READ); + mdp_wb_write(ctx, MDSS_MDP_REG_WB_CREQ_LUT, wb_qos_setup); +} + static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx, int is_wfd) { @@ -447,7 +471,7 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl, cwb = &mdp5_data->cwb; ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data; - buffer = &cwb->layer->buffer; + buffer = &cwb->layer.buffer; ctx->opmode = 0; ctx->img_width = buffer->width; @@ -495,6 +519,8 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl, if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit) mdss_mdp_set_ot_limit_wb(ctx, false); + mdss_mdp_set_qos_wb(ctl, ctx); + return ret; } @@ -897,6 +923,8 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg) ctl->mdata->default_ot_rd_limit) mdss_mdp_set_ot_limit_wb(ctx, true); + mdss_mdp_set_qos_wb(ctl, ctx); + wb_args = (struct mdss_mdp_writeback_arg *) arg; if (!wb_args) return -ENOENT; diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c index 91d4332700b6..0f0df2256f74 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_layer.c +++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c @@ -2285,12 +2285,12 @@ end: return ret; } -int __is_cwb_requested(uint32_t output_layer_flags) +int __is_cwb_requested(uint32_t commit_flags) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); int req = 0; - req = output_layer_flags & MDP_COMMIT_CWB_EN; + req = commit_flags & MDP_COMMIT_CWB_EN; if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) { pr_err("CWB not supported"); return -ENODEV; @@ -2330,7 +2330,7 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd, return -EINVAL; if (commit->output_layer) { - ret = __is_cwb_requested(commit->output_layer->flags); + ret = __is_cwb_requested(commit->flags); if (IS_ERR_VALUE(ret)) { return ret; } else if (ret) { @@ -2493,7 +2493,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd, } if (commit->output_layer) { - rc = __is_cwb_requested(commit->output_layer->flags); + rc = __is_cwb_requested(commit->flags); if (IS_ERR_VALUE(rc)) { return rc; } else if (rc) { @@ -2553,7 +2553,7 @@ int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd, return rc; } - mdp5_data->cwb.layer = commit->output_layer; + mdp5_data->cwb.layer = *commit->output_layer; mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx; mutex_lock(&mdp5_data->cwb.queue_lock); diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c index 9dda467e53cc..965d4a6cfb5e 100644 --- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c +++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c @@ -3243,6 +3243,110 @@ static ssize_t mdss_mdp_dyn_pu_store(struct device *dev, return count; } + +static ssize_t mdss_mdp_panel_disable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + struct mdss_mdp_ctl *ctl; + struct mdss_panel_data *pdata; + + if (!mfd) { + pr_err("Invalid mfd structure\n"); + return -EINVAL; + } + + ctl = mfd_to_ctl(mfd); + if (!ctl) { + pr_err("Invalid ctl structure\n"); + return -EINVAL; + } + + pdata = dev_get_platdata(&mfd->pdev->dev); + + ret = snprintf(buf, PAGE_SIZE, "%d\n", + pdata->panel_disable_mode); + + return ret; +} + +int mdss_mdp_enable_panel_disable_mode(struct msm_fb_data_type *mfd, + bool disable_panel) +{ + struct mdss_mdp_ctl *ctl; + int ret = 0; + struct mdss_panel_data *pdata; + + ctl = mfd_to_ctl(mfd); + if (!ctl) { + pr_err("Invalid ctl structure\n"); + ret = -EINVAL; + return ret; + } + + pdata = dev_get_platdata(&mfd->pdev->dev); + + pr_debug("config panel %d\n", disable_panel); + if (disable_panel) { + /* first set the flag that we enter this mode */ + pdata->panel_disable_mode = true; + + /* + * setup any interface config that needs to change before + * disabling the panel + */ + if (ctl->ops.panel_disable_cfg) + ctl->ops.panel_disable_cfg(ctl, disable_panel); + + /* disable panel */ + ret = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_DISABLE_PANEL, + NULL, CTL_INTF_EVENT_FLAG_DEFAULT); + if (ret) + pr_err("failed to disable panel! %d\n", ret); + } else { + /* restore any interface configuration */ + if (ctl->ops.panel_disable_cfg) + ctl->ops.panel_disable_cfg(ctl, disable_panel); + + /* + * no other action is needed when reconfiguring, since all the + * re-configuration will happen during restore + */ + pdata->panel_disable_mode = false; + } + + return ret; +} + +static ssize_t mdss_mdp_panel_disable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + int disable_panel, rc; + struct fb_info *fbi = dev_get_drvdata(dev); + struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par; + + if (!mfd) { + pr_err("Invalid mfd structure\n"); + rc = -EINVAL; + return rc; + } + + rc = kstrtoint(buf, 10, &disable_panel); + if (rc) { + pr_err("kstrtoint failed. rc=%d\n", rc); + return rc; + } + + pr_debug("disable panel: %d ++\n", disable_panel); + /* we only support disabling the panel from sysfs */ + if (disable_panel) + mdss_mdp_enable_panel_disable_mode(mfd, true); + + return len; +} + static ssize_t mdss_mdp_cmd_autorefresh_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -3433,6 +3537,8 @@ static DEVICE_ATTR(msm_misr_en, S_IRUGO | S_IWUSR, mdss_mdp_misr_show, mdss_mdp_misr_store); static DEVICE_ATTR(msm_cmd_autorefresh_en, S_IRUGO | S_IWUSR, mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store); +static DEVICE_ATTR(msm_disable_panel, S_IRUGO | S_IWUSR, + mdss_mdp_panel_disable_show, mdss_mdp_panel_disable_store); static DEVICE_ATTR(vsync_event, S_IRUGO, mdss_mdp_vsync_show_event, NULL); static DEVICE_ATTR(lineptr_event, S_IRUGO, mdss_mdp_lineptr_show_event, NULL); static DEVICE_ATTR(lineptr_value, S_IRUGO | S_IWUSR | S_IWGRP, @@ -3454,6 +3560,7 @@ static struct attribute *mdp_overlay_sysfs_attrs[] = { &dev_attr_dyn_pu.attr, &dev_attr_msm_misr_en.attr, &dev_attr_msm_cmd_autorefresh_en.attr, + &dev_attr_msm_disable_panel.attr, &dev_attr_hist_event.attr, &dev_attr_bl_event.attr, &dev_attr_ad_event.attr, diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h index b2b647dcc017..463d26643dde 100644 --- a/drivers/video/fbdev/msm/mdss_panel.h +++ b/drivers/video/fbdev/msm/mdss_panel.h @@ -104,6 +104,7 @@ enum { MDSS_PANEL_POWER_ON, MDSS_PANEL_POWER_LP1, MDSS_PANEL_POWER_LP2, + MDSS_PANEL_POWER_LCD_DISABLED, }; enum { @@ -265,6 +266,7 @@ enum mdss_intf_events { MDSS_EVENT_DSI_RESET_WRITE_PTR, MDSS_EVENT_PANEL_TIMING_SWITCH, MDSS_EVENT_DEEP_COLOR, + MDSS_EVENT_DISABLE_PANEL, MDSS_EVENT_MAX, }; @@ -633,6 +635,10 @@ struct mdss_panel_info { u32 saved_fporch; /* current fps, once is programmed in hw */ int current_fps; + u32 mdp_koff_thshold_low; + u32 mdp_koff_thshold_high; + bool mdp_koff_thshold; + u32 mdp_koff_delay; int panel_max_fps; int panel_max_vtotal; @@ -692,6 +698,7 @@ struct mdss_panel_info { char panel_name[MDSS_MAX_PANEL_LEN]; struct mdss_mdp_pp_tear_check te; + struct mdss_mdp_pp_tear_check te_cached; /* * Value of 2 only when single DSI is configured with 2 DSC @@ -789,6 +796,12 @@ struct mdss_panel_data { /* To store dsc cfg name passed by bootloader */ char dsc_cfg_np_name[MDSS_MAX_PANEL_LEN]; struct mdss_panel_data *next; + + /* + * Set when the power of the panel is disabled while dsi/mdp + * are still on; panel will recover after unblank + */ + bool panel_disable_mode; }; struct mdss_panel_debugfs_info { diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c index b5da4ad1a86b..eab7bcaaa156 100644 --- a/drivers/video/fbdev/msm/mdss_smmu.c +++ b/drivers/video/fbdev/msm/mdss_smmu.c @@ -573,7 +573,6 @@ int mdss_smmu_probe(struct platform_device *pdev) struct mdss_smmu_domain smmu_domain; const struct of_device_id *match; struct dss_module_power *mp; - int disable_htw = 1; char name[MAX_CLIENT_NAME_LEN]; const __be32 *address = NULL, *size = NULL; @@ -667,13 +666,6 @@ int mdss_smmu_probe(struct platform_device *pdev) goto disable_power; } - rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain, - DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw); - if (rc) { - pr_err("couldn't disable coherent HTW\n"); - goto release_mapping; - } - if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE || smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) { int secure_vmid = VMID_CP_PIXEL; diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c index e229f52057d4..4899231787f2 100644 --- a/drivers/video/fbdev/msm/msm_ext_display.c +++ b/drivers/video/fbdev/msm/msm_ext_display.c @@ -365,6 +365,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev, ext_disp->ops->get_audio_edid_blk = NULL; ext_disp->ops->cable_status = NULL; ext_disp->ops->get_intf_id = NULL; + ext_disp->ops->teardown_done = NULL; } ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; @@ -463,6 +464,20 @@ end: return ret; } +static void msm_ext_disp_teardown_done(struct platform_device *pdev) +{ + int ret = 0; + struct msm_ext_disp_init_data *data = NULL; + + ret = msm_ext_disp_get_intf_data_helper(pdev, &data); + if (ret || !data) { + pr_err("invalid input"); + return; + } + + data->codec_ops.teardown_done(data->pdev); +} + static int msm_ext_disp_get_intf_id(struct platform_device *pdev) { int ret = 0; @@ -545,6 +560,8 @@ static int msm_ext_disp_notify(struct platform_device *pdev, msm_ext_disp_cable_status; ext_disp->ops->get_intf_id = msm_ext_disp_get_intf_id; + ext_disp->ops->teardown_done = + msm_ext_disp_teardown_done; } switch_set_state(&ext_disp->audio_sdev, (int)new_state); @@ -614,6 +631,7 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack) ext_disp->ops->get_audio_edid_blk = NULL; ext_disp->ops->cable_status = NULL; ext_disp->ops->get_intf_id = NULL; + ext_disp->ops->teardown_done = NULL; } ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index b15e6edb8f2c..933f1866b811 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3602,6 +3602,7 @@ int ext4_can_truncate(struct inode *inode) int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) { +#if 0 struct super_block *sb = inode->i_sb; ext4_lblk_t first_block, stop_block; struct address_space *mapping = inode->i_mapping; @@ -3725,6 +3726,12 @@ out_dio: out_mutex: mutex_unlock(&inode->i_mutex); return ret; +#else + /* + * Disabled as per b/28760453 + */ + return -EOPNOTSUPP; +#endif } int ext4_inode_attach_jinode(struct inode *inode) diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c index e8671942c2a0..785af63acabd 100644 --- a/fs/fuse/passthrough.c +++ b/fs/fuse/passthrough.c @@ -71,10 +71,12 @@ static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb, struct fuse_file *ff; struct file *fuse_file, *passthrough_filp; struct inode *fuse_inode, *passthrough_inode; + struct fuse_conn *fc; ff = iocb->ki_filp->private_data; fuse_file = iocb->ki_filp; passthrough_filp = ff->passthrough_filp; + fc = ff->fc; /* lock passthrough file to prevent it from being released */ get_file(passthrough_filp); @@ -88,7 +90,9 @@ static ssize_t fuse_passthrough_read_write_iter(struct kiocb *iocb, ret_val = passthrough_filp->f_op->write_iter(iocb, iter); if (ret_val >= 0 || ret_val == -EIOCBQUEUED) { + spin_lock(&fc->lock); fsstack_copy_inode_size(fuse_inode, passthrough_inode); + spin_unlock(&fc->lock); fsstack_copy_attr_times(fuse_inode, passthrough_inode); } } else { diff --git a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h index 0bbcbd28af33..609a20422ed1 100644 --- a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h +++ b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h @@ -207,52 +207,4 @@ #define GCC_USB_30_BCR 7 #define GCC_USB_PHY_CFG_AHB2PHY_BCR 8 -/* RPM controlled clocks */ -#define RPM_CE1_CLK 1 -#define RPM_CE1_A_CLK 2 -#define RPM_CXO_CLK_SRC 3 -#define RPM_BIMC_CLK 4 -#define RPM_BIMC_A_CLK 5 -#define RPM_CNOC_CLK 6 -#define RPM_CNOC_A_CLK 7 -#define RPM_SNOC_CLK 8 -#define RPM_SNOC_A_CLK 9 -#define RPM_CNOC_PERIPH_CLK 10 -#define RPM_CNOC_PERIPH_A_CLK 11 -#define RPM_CNOC_PERIPH_KEEPALIVE_A_CLK 12 -#define RPM_LN_BB_CLK1 13 -#define RPM_LN_BB_CLK1_AO 14 -#define RPM_LN_BB_CLK1_PIN 15 -#define RPM_LN_BB_CLK1_PIN_AO 16 -#define RPM_BIMC_MSMBUS_CLK 17 -#define RPM_BIMC_MSMBUS_A_CLK 18 -#define RPM_CNOC_MSMBUS_CLK 19 -#define RPM_CNOC_MSMBUS_A_CLK 20 -#define RPM_CXO_CLK_SRC_AO 21 -#define RPM_CXO_DWC3_CLK 22 -#define RPM_CXO_LPM_CLK 23 -#define RPM_CXO_OTG_CLK 24 -#define RPM_CXO_PIL_LPASS_CLK 25 -#define RPM_CXO_PIL_SSC_CLK 26 -#define RPM_CXO_PIL_SPSS_CLK 27 -#define RPM_DIV_CLK1 28 -#define RPM_DIV_CLK1_AO 29 -#define RPM_IPA_CLK 30 -#define RPM_IPA_A_CLK 31 -#define RPM_MCD_CE1_CLK 32 -#define RPM_MMSSNOC_AXI_CLK 33 -#define RPM_MMSSNOC_AXI_A_CLK 34 -#define RPM_QCEDEV_CE1_CLK 35 -#define RPM_QCRYPTO_CE1_CLK 36 -#define RPM_QDSS_CLK 37 -#define RPM_QDSS_A_CLK 38 -#define RPM_QSEECOM_CE1_CLK 39 -#define RPM_RF_CLK2 40 -#define RPM_RF_CLK2_AO 41 -#define RPM_SCM_CE1_CLK 42 -#define RPM_SNOC_MSMBUS_CLK 43 -#define RPM_SNOC_MSMBUS_A_CLK 44 -#define RPM_AGGRE2_NOC_CLK 45 -#define RPM_AGGRE2_NOC_A_CLK 46 - #endif diff --git a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h index 427c6aae05d3..2ef1e34db3a1 100644 --- a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h +++ b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h @@ -14,27 +14,32 @@ #ifndef _DT_BINDINGS_CLK_MSM_GPU_FALCON_H #define _DT_BINDINGS_CLK_MSM_GPU_FALCON_H -#define GFX3D_CLK_SRC 0 -#define GPU_PLL0_PLL 1 -#define GPU_PLL0_PLL_OUT_AUX 2 -#define GPU_PLL0_PLL_OUT_AUX2 3 -#define GPU_PLL0_PLL_OUT_EARLY 4 -#define GPU_PLL0_PLL_OUT_MAIN 5 -#define GPU_PLL0_PLL_OUT_TEST 6 -#define GPU_PLL1_PLL 7 -#define GPU_PLL1_PLL_OUT_AUX 8 -#define GPU_PLL1_PLL_OUT_AUX2 9 -#define GPU_PLL1_PLL_OUT_EARLY 10 -#define GPU_PLL1_PLL_OUT_MAIN 11 -#define GPU_PLL1_PLL_OUT_TEST 12 -#define GPUCC_CXO_CLK 13 -#define GPUCC_GFX3D_CLK 14 -#define GPUCC_RBBMTIMER_CLK 15 -#define GPUCC_RBCPR_CLK 16 -#define RBBMTIMER_CLK_SRC 18 -#define RBCPR_CLK_SRC 19 +#define GFX3D_CLK_SRC 0 +#define GPU_PLL0_PLL 1 +#define GPU_PLL0_PLL_OUT_AUX 2 +#define GPU_PLL0_PLL_OUT_AUX2 3 +#define GPU_PLL0_PLL_OUT_EARLY 4 +#define GPU_PLL0_PLL_OUT_MAIN 5 +#define GPU_PLL0_PLL_OUT_TEST 6 +#define GPU_PLL1_PLL 7 +#define GPU_PLL1_PLL_OUT_AUX 8 +#define GPU_PLL1_PLL_OUT_AUX2 9 +#define GPU_PLL1_PLL_OUT_EARLY 10 +#define GPU_PLL1_PLL_OUT_MAIN 11 +#define GPU_PLL1_PLL_OUT_TEST 12 +#define GPUCC_CXO_CLK 13 +#define GPUCC_GFX3D_CLK 14 +#define GPUCC_RBBMTIMER_CLK 15 +#define GPUCC_RBCPR_CLK 16 +#define RBBMTIMER_CLK_SRC 17 +#define RBCPR_CLK_SRC 18 -#define GPU_CX_GDSC 0 -#define GPU_GX_GDSC 1 +#define GPU_CX_GDSC 0 +#define GPU_GX_GDSC 1 + +#define GPUCC_GPU_CX_BCR 0 +#define GPUCC_GPU_GX_BCR 1 +#define GPUCC_RBCPR_BCR 2 +#define GPUCC_SPDM_BCR 3 #endif diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1a96fdaa33d5..e133705d794a 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -26,6 +26,10 @@ SUBSYS(cpu) SUBSYS(cpuacct) #endif +#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE) +SUBSYS(schedtune) +#endif + #if IS_ENABLED(CONFIG_BLK_CGROUP) SUBSYS(io) #endif diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h index aa9fcfe73162..ddd126c0fd85 100644 --- a/include/linux/dma-mapping-fast.h +++ b/include/linux/dma-mapping-fast.h @@ -16,6 +16,8 @@ #include <linux/iommu.h> #include <linux/io-pgtable-fast.h> +struct dma_iommu_mapping; + struct dma_fast_smmu_mapping { struct device *dev; struct iommu_domain *domain; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 56855724271c..c34a68ce901a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -121,7 +121,6 @@ enum iommu_attr { DOMAIN_ATTR_FSL_PAMU_ENABLE, DOMAIN_ATTR_FSL_PAMUV1, DOMAIN_ATTR_NESTING, /* two stages of translation */ - DOMAIN_ATTR_COHERENT_HTW_DISABLE, DOMAIN_ATTR_PT_BASE_ADDR, DOMAIN_ATTR_SECURE_VMID, DOMAIN_ATTR_ATOMIC, @@ -650,8 +649,8 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) { } -static int iommu_dma_supported(struct iommu_domain *domain, struct device *dev, - u64 mask) +static inline int iommu_dma_supported(struct iommu_domain *domain, + struct device *dev, u64 mask) { return -EINVAL; } diff --git a/include/linux/msm_dma_iommu_mapping.h b/include/linux/msm_dma_iommu_mapping.h index 76451faa2073..73e69383b9b6 100644 --- a/include/linux/msm_dma_iommu_mapping.h +++ b/include/linux/msm_dma_iommu_mapping.h @@ -90,7 +90,7 @@ static inline void msm_dma_unmap_sg(struct device *dev, { } -int msm_dma_unmap_all_for_dev(struct device *dev) +static inline int msm_dma_unmap_all_for_dev(struct device *dev) { return 0; } diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h index 873a778d5370..59ba776b5f9b 100644 --- a/include/linux/msm_ext_display.h +++ b/include/linux/msm_ext_display.h @@ -108,6 +108,7 @@ struct msm_ext_disp_audio_codec_ops { struct msm_ext_disp_audio_edid_blk *blk); int (*cable_status)(struct platform_device *pdev, u32 vote); int (*get_intf_id)(struct platform_device *pdev); + void (*teardown_done)(struct platform_device *pdev); }; /* diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index c95a529b029b..fb2607dd365b 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -13,6 +13,14 @@ #define MSM_GSI_H #include <linux/types.h> +enum gsi_ver { + GSI_VER_ERR = 0, + GSI_VER_1_0 = 1, + GSI_VER_1_2 = 2, + GSI_VER_1_3 = 3, + GSI_VER_MAX, +}; + enum gsi_status { GSI_STATUS_SUCCESS = 0, GSI_STATUS_ERROR = 1, @@ -65,6 +73,7 @@ enum gsi_intr_type { /** * gsi_per_props - Peripheral related properties * + * @gsi: GSI core version * @ee: EE where this driver and peripheral driver runs * @intr: control interrupt type * @intvec: write data for MSI write @@ -87,6 +96,7 @@ enum gsi_intr_type { * */ struct gsi_per_props { + enum gsi_ver ver; unsigned int ee; enum gsi_intr_type intr; uint32_t intvec; diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h index b13ebe50c3d6..7c12823894df 100644 --- a/include/linux/qpnp/qpnp-revid.h +++ b/include/linux/qpnp/qpnp-revid.h @@ -212,6 +212,7 @@ struct pmic_revid_data { u8 pmic_type; u8 pmic_subtype; const char *pmic_name; + int fab_id; }; #ifdef CONFIG_QPNP_REVID diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 861f715a673d..9fe71c774543 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -121,6 +121,22 @@ extern int sysctl_sched_rt_runtime; extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif +#ifdef CONFIG_SCHED_TUNE +extern unsigned int sysctl_sched_cfs_boost; +int sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, + loff_t *ppos); +static inline unsigned int get_sysctl_sched_cfs_boost(void) +{ + return sysctl_sched_cfs_boost; +} +#else +static inline unsigned int get_sysctl_sched_cfs_boost(void) +{ + return 0; +} +#endif + #ifdef CONFIG_SCHED_AUTOGROUP extern unsigned int sysctl_sched_autogroup_enabled; #endif diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 3740366d9fc5..cef429cf3dce 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -502,7 +502,7 @@ extern void usb_hc_died(struct usb_hcd *hcd); extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd); extern void usb_wakeup_notification(struct usb_device *hdev, unsigned int portnum); - +extern void usb_flush_hub_wq(void); extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum); extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum); diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h index 8704b2e7cfbc..7e2f32883aa4 100644 --- a/include/soc/qcom/icnss.h +++ b/include/soc/qcom/icnss.h @@ -24,8 +24,6 @@ struct icnss_driver_ops { void (*shutdown)(struct device *dev); int (*reinit)(struct device *dev); void (*crash_shutdown)(void *pdev); - int (*suspend)(struct device *dev, pm_message_t state); - int (*resume)(struct device *dev); int (*pm_suspend)(struct device *dev); int (*pm_resume)(struct device *dev); int (*suspend_noirq)(struct device *dev); @@ -125,5 +123,6 @@ extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count, u16 buf_len); extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len); extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len); +extern bool icnss_is_qmi_disable(void); #endif /* _ICNSS_WLAN_H_ */ diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h index 1a58a146c3b0..06b72b262395 100644 --- a/include/sound/apr_audio-v2.h +++ b/include/sound/apr_audio-v2.h @@ -3678,6 +3678,8 @@ struct asm_softvolume_params { #define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 0x00010DDC +#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 0x0001320C + #define ASM_MEDIA_FMT_EVRCB_FS 0x00010BEF #define ASM_MEDIA_FMT_EVRCWB_FS 0x00010BF0 @@ -3780,6 +3782,56 @@ struct asm_multi_channel_pcm_fmt_blk_v3 { */ } __packed; +struct asm_multi_channel_pcm_fmt_blk_v4 { + uint16_t num_channels; +/* + * Number of channels + * Supported values: 1 to 8 + */ + + uint16_t bits_per_sample; +/* + * Number of bits per sample per channel + * Supported values: 16, 24, 32 + */ + + uint32_t sample_rate; +/* + * Number of samples per second + * Supported values: 2000 to 48000, 96000,192000 Hz + */ + + uint16_t is_signed; +/* Flag that indicates that PCM samples are signed (1) */ + + uint16_t sample_word_size; +/* + * Size in bits of the word that holds a sample of a channel. + * Supported values: 12,24,32 + */ + + uint8_t channel_mapping[8]; +/* + * Each element, i, in the array describes channel i inside the buffer where + * 0 <= i < num_channels. Unused channels are set to 0. + */ + uint16_t endianness; +/* + * Flag to indicate the endianness of the pcm sample + * Supported values: 0 - Little endian (all other formats) + * 1 - Big endian (AIFF) + */ + uint16_t mode; +/* + * Mode to provide additional info about the pcm input data. + * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b, + * Q31 for unpacked 24b or 32b) + * 15 - for 16 bit + * 23 - for 24b packed or 8.24 format + * 31 - for 24b unpacked or 32bit + */ +} __packed; + /* * Payload of the multichannel PCM configuration parameters in * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3 media format. @@ -3790,6 +3842,16 @@ struct asm_multi_channel_pcm_fmt_blk_param_v3 { struct asm_multi_channel_pcm_fmt_blk_v3 param; } __packed; +/* + * Payload of the multichannel PCM configuration parameters in + * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format. + */ +struct asm_multi_channel_pcm_fmt_blk_param_v4 { + struct apr_hdr hdr; + struct asm_data_cmd_media_fmt_update_v2 fmt_blk; + struct asm_multi_channel_pcm_fmt_blk_v4 param; +} __packed; + struct asm_stream_cmd_set_encdec_param { u32 param_id; /* ID of the parameter. */ @@ -3825,6 +3887,79 @@ struct asm_dec_ddp_endp_param_v2 { int endp_param_value; } __packed; +/* + * Payload of the multichannel PCM encoder configuration parameters in + * the ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4 media format. + */ + +struct asm_multi_channel_pcm_enc_cfg_v4 { + struct apr_hdr hdr; + struct asm_stream_cmd_set_encdec_param encdec; + struct asm_enc_cfg_blk_param_v2 encblk; + uint16_t num_channels; + /* + * Number of PCM channels. + * @values + * - 0 -- Native mode + * - 1 -- 8 channels + * Native mode indicates that encoding must be performed with the number + * of channels at the input. + */ + uint16_t bits_per_sample; + /* + * Number of bits per sample per channel. + * @values 16, 24 + */ + uint32_t sample_rate; + /* + * Number of samples per second. + * @values 0, 8000 to 48000 Hz + * A value of 0 indicates the native sampling rate. Encoding is + * performed at the input sampling rate. + */ + uint16_t is_signed; + /* + * Flag that indicates the PCM samples are signed (1). Currently, only + * signed PCM samples are supported. + */ + uint16_t sample_word_size; + /* + * The size in bits of the word that holds a sample of a channel. + * @values 16, 24, 32 + * 16-bit samples are always placed in 16-bit words: + * sample_word_size = 1. + * 24-bit samples can be placed in 32-bit words or in consecutive + * 24-bit words. + * - If sample_word_size = 32, 24-bit samples are placed in the + * most significant 24 bits of a 32-bit word. + * - If sample_word_size = 24, 24-bit samples are placed in + * 24-bit words. @tablebulletend + */ + uint8_t channel_mapping[8]; + /* + * Channel mapping array expected at the encoder output. + * Channel[i] mapping describes channel i inside the buffer, where + * 0 @le i < num_channels. All valid used channels must be present at + * the beginning of the array. + * If Native mode is set for the channels, this field is ignored. + * @values See Section @xref{dox:PcmChannelDefs} + */ + uint16_t endianness; + /* + * Flag to indicate the endianness of the pcm sample + * Supported values: 0 - Little endian (all other formats) + * 1 - Big endian (AIFF) + */ + uint16_t mode; + /* + * Mode to provide additional info about the pcm input data. + * Supported values: 0 - Default QFs (Q15 for 16b, Q23 for packed 24b, + * Q31 for unpacked 24b or 32b) + * 15 - for 16 bit + * 23 - for 24b packed or 8.24 format + * 31 - for 24b unpacked or 32bit + */ +} __packed; /* * Payload of the multichannel PCM encoder configuration parameters in diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h index 9ed6510cd0e1..31f7c02b54b3 100644 --- a/include/sound/q6afe-v2.h +++ b/include/sound/q6afe-v2.h @@ -281,7 +281,7 @@ void afe_set_cal_mode(u16 port_id, enum afe_cal_mode afe_cal_mode); int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate); int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config, - u32 rate, u16 afe_in_channels, + u32 rate, u16 afe_in_channels, u16 afe_in_bit_width, struct afe_enc_config *enc_config); int afe_spk_prot_feed_back_cfg(int src_port, int dst_port, int l_ch, int r_ch, u32 enable); diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h index 00129eb08888..f08bd73edb59 100644 --- a/include/sound/q6asm-v2.h +++ b/include/sound/q6asm-v2.h @@ -97,6 +97,24 @@ #define ASM_SHIFT_GAPLESS_MODE_FLAG 31 #define ASM_SHIFT_LAST_BUFFER_FLAG 30 +#define ASM_LITTLE_ENDIAN 0 +#define ASM_BIG_ENDIAN 1 + +/* PCM_MEDIA_FORMAT_Version */ +enum { + PCM_MEDIA_FORMAT_V2 = 0, + PCM_MEDIA_FORMAT_V3, + PCM_MEDIA_FORMAT_V4, +}; + +/* PCM format modes in DSP */ +enum { + DEFAULT_QF = 0, + Q15 = 15, + Q23 = 23, + Q31 = 31, +}; + /* payload structure bytes */ #define READDONE_IDX_STATUS 0 #define READDONE_IDX_BUFADD_LSW 1 @@ -245,6 +263,9 @@ int q6asm_open_read_v2(struct audio_client *ac, uint32_t format, int q6asm_open_read_v3(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample); +int q6asm_open_read_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample); + int q6asm_open_write(struct audio_client *ac, uint32_t format /*, uint16_t bits_per_sample*/); @@ -257,6 +278,9 @@ int q6asm_open_shared_io(struct audio_client *ac, int q6asm_open_write_v3(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample); +int q6asm_open_write_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample); + int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, int32_t stream_id, bool is_gapless_mode); @@ -265,6 +289,10 @@ int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, int32_t stream_id, bool is_gapless_mode); +int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample, int32_t stream_id, + bool is_gapless_mode); + int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format, uint32_t passthrough_flag); @@ -369,6 +397,13 @@ int q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac, bool use_back_flavor, u8 *channel_map, uint16_t sample_word_size); +int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, bool use_default_chmap, + bool use_back_flavor, u8 *channel_map, + uint16_t sample_word_size, uint16_t endianness, + uint16_t mode); + int q6asm_enc_cfg_blk_pcm_format_support(struct audio_client *ac, uint32_t rate, uint32_t channels, uint16_t bits_per_sample); @@ -378,6 +413,13 @@ int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac, uint16_t bits_per_sample, uint16_t sample_word_size); +int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode); + int q6asm_set_encdec_chan_map(struct audio_client *ac, uint32_t num_channels); @@ -427,6 +469,17 @@ int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac, char *channel_map, uint16_t sample_word_size); +int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac, + uint32_t rate, + uint32_t channels, + uint16_t bits_per_sample, + int stream_id, + bool use_default_chmap, + char *channel_map, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode); + int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels, bool use_default_chmap, char *channel_map); @@ -444,6 +497,15 @@ int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac, uint16_t bits_per_sample, uint16_t sample_word_size); +int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + bool use_default_chmap, + char *channel_map, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode); + int q6asm_media_format_block_aac(struct audio_client *ac, struct asm_aac_cfg *cfg); diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h index 5adcbcf660ba..aa3b363e95e1 100644 --- a/include/sound/wcd-dsp-mgr.h +++ b/include/sound/wcd-dsp-mgr.h @@ -36,6 +36,9 @@ enum wdsp_cmpnt_type { }; enum wdsp_event_type { + /* Initialization related */ + WDSP_EVENT_POST_INIT, + /* Image download related */ WDSP_EVENT_PRE_DLOAD_CODE, WDSP_EVENT_DLOAD_SECTION, @@ -44,6 +47,8 @@ enum wdsp_event_type { WDSP_EVENT_POST_DLOAD_DATA, WDSP_EVENT_DLOAD_FAILED, + WDSP_EVENT_READ_SECTION, + /* DSP boot related */ WDSP_EVENT_PRE_BOOTUP, WDSP_EVENT_DO_BOOT, @@ -62,6 +67,7 @@ enum wdsp_event_type { enum wdsp_intr { WDSP_IPC1_INTR, + WDSP_ERR_INTR, }; /* @@ -86,6 +92,12 @@ struct wdsp_img_section { u8 *data; }; +struct wdsp_err_intr_arg { + bool mem_dumps_enabled; + u32 remote_start_addr; + size_t dump_size; +}; + /* * wdsp_ops: ops/function callbacks for manager driver * @register_cmpnt_ops: components will use this to register @@ -109,7 +121,7 @@ struct wdsp_mgr_ops { struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev, enum wdsp_cmpnt_type type); int (*intr_handler)(struct device *wdsp_dev, - enum wdsp_intr intr); + enum wdsp_intr intr, void *arg); int (*vote_for_dsp)(struct device *wdsp_dev, bool vote); int (*suspend)(struct device *wdsp_dev); int (*resume)(struct device *wdsp_dev); diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h index 691df1b2689b..97eefc665130 100644 --- a/include/trace/events/trace_msm_low_power.h +++ b/include/trace/events/trace_msm_low_power.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,6 +43,54 @@ TRACE_EVENT(cpu_power_select, __entry->next_event_us) ); +TRACE_EVENT(cpu_pred_select, + + TP_PROTO(u32 predtype, u64 predicted, u32 tmr_time), + + TP_ARGS(predtype, predicted, tmr_time), + + TP_STRUCT__entry( + __field(u32, predtype) + __field(u64, predicted) + __field(u32, tmr_time) + ), + + TP_fast_assign( + __entry->predtype = predtype; + __entry->predicted = predicted; + __entry->tmr_time = tmr_time; + ), + + TP_printk("pred:%u time:%lu tmr_time:%u", + __entry->predtype, (unsigned long)__entry->predicted, + __entry->tmr_time) +); + +TRACE_EVENT(cpu_pred_hist, + + TP_PROTO(int idx, u32 resi, u32 sample, u32 tmr), + + TP_ARGS(idx, resi, sample, tmr), + + TP_STRUCT__entry( + __field(int, idx) + __field(u32, resi) + __field(u32, sample) + __field(u32, tmr) + ), + + TP_fast_assign( + __entry->idx = idx; + __entry->resi = resi; + __entry->sample = sample; + __entry->tmr = tmr; + ), + + TP_printk("idx:%d resi:%u sample:%u tmr:%u", + __entry->idx, __entry->resi, + __entry->sample, __entry->tmr) +); + TRACE_EVENT(cpu_idle_enter, TP_PROTO(int index), @@ -144,6 +192,64 @@ TRACE_EVENT(cluster_exit, __entry->from_idle) ); +TRACE_EVENT(cluster_pred_select, + + TP_PROTO(const char *name, int index, u32 sleep_us, + u32 latency, int pred, u32 pred_us), + + TP_ARGS(name, index, sleep_us, latency, pred, pred_us), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, index) + __field(u32, sleep_us) + __field(u32, latency) + __field(int, pred) + __field(u32, pred_us) + ), + + TP_fast_assign( + __entry->name = name; + __entry->index = index; + __entry->sleep_us = sleep_us; + __entry->latency = latency; + __entry->pred = pred; + __entry->pred_us = pred_us; + ), + + TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u", + __entry->name, __entry->index, __entry->sleep_us, + __entry->latency, __entry->pred, __entry->pred_us) +); + +TRACE_EVENT(cluster_pred_hist, + + TP_PROTO(const char *name, int idx, u32 resi, + u32 sample, u32 tmr), + + TP_ARGS(name, idx, resi, sample, tmr), + + TP_STRUCT__entry( + __field(const char *, name) + __field(int, idx) + __field(u32, resi) + __field(u32, sample) + __field(u32, tmr) + ), + + TP_fast_assign( + __entry->name = name; + __entry->idx = idx; + __entry->resi = resi; + __entry->sample = sample; + __entry->tmr = tmr; + ), + + TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u", + __entry->name, __entry->idx, __entry->resi, + __entry->sample, __entry->tmr) +); + TRACE_EVENT(pre_pc_cb, TP_PROTO(int tzflag), diff --git a/include/uapi/linux/msm_vidc_dec.h b/include/uapi/linux/msm_vidc_dec.h index f502c81665a4..48ce8e929fbf 100644 --- a/include/uapi/linux/msm_vidc_dec.h +++ b/include/uapi/linux/msm_vidc_dec.h @@ -486,10 +486,14 @@ enum vdec_interlaced_format { VDEC_InterlaceInterleaveFrameBottomFieldFirst = 0x4 }; +#define VDEC_YUV_FORMAT_NV12_TP10_UBWC \ + VDEC_YUV_FORMAT_NV12_TP10_UBWC + enum vdec_output_fromat { VDEC_YUV_FORMAT_NV12 = 0x1, VDEC_YUV_FORMAT_TILE_4x2 = 0x2, - VDEC_YUV_FORMAT_NV12_UBWC = 0x3 + VDEC_YUV_FORMAT_NV12_UBWC = 0x3, + VDEC_YUV_FORMAT_NV12_TP10_UBWC = 0x4 }; enum vdec_output_order { diff --git a/include/uapi/sound/wcd-dsp-glink.h b/include/uapi/sound/wcd-dsp-glink.h index db92e6b41340..39d128d370a0 100644 --- a/include/uapi/sound/wcd-dsp-glink.h +++ b/include/uapi/sound/wcd-dsp-glink.h @@ -8,7 +8,9 @@ enum { WDSP_REG_PKT = 1, WDSP_CMD_PKT, + WDSP_READY_PKT, }; +#define WDSP_READY_PKT WDSP_READY_PKT /* * struct wdsp_reg_pkt - Glink channel information structure format diff --git a/init/Kconfig b/init/Kconfig index 6020a351c57b..eb9e1a0aa688 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1002,6 +1002,23 @@ config CGROUP_CPUACCT config PAGE_COUNTER bool +config CGROUP_SCHEDTUNE + bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)" + depends on SCHED_TUNE + help + This option provides the "schedtune" controller which improves the + flexibility of the task boosting mechanism by introducing the support + to define "per task" boost values. + + This new controller: + 1. allows only a two layers hierarchy, where the root defines the + system-wide boost value and its direct childrens define each one a + different "class of tasks" to be boosted with a different value + 2. supports up to 16 different task classes, each one which could be + configured with a different boost value + + Say N if unsure. + config MEMCG bool "Memory Resource Controller for Control Groups" select PAGE_COUNTER @@ -1264,6 +1281,32 @@ config SCHED_AUTOGROUP desktop applications. Task group autogeneration is currently based upon task session. +config SCHED_TUNE + bool "Boosting for CFS tasks (EXPERIMENTAL)" + help + This option enables the system-wide support for task boosting. + When this support is enabled a new sysctl interface is exposed to + userspace via: + /proc/sys/kernel/sched_cfs_boost + which allows to set a system-wide boost value in range [0..100]. + + The currently boosting strategy is implemented in such a way that: + - a 0% boost value requires to operate in "standard" mode by + scheduling all tasks at the minimum capacities required by their + workload demand + - a 100% boost value requires to push at maximum the task + performances, "regardless" of the incurred energy consumption + + A boost value in between these two boundaries is used to bias the + power/performance trade-off, the higher the boost value the more the + scheduler is biased toward performance boosting instead of energy + efficiency. + + Since this support exposes a single system-wide knob, the specified + boost value is applied to all (CFS) tasks in the system. + + If unsure, say N. + config SYSFS_DEPRECATED bool "Enable deprecated sysfs features to support old userspace tools" depends on SYSFS diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 104432f3d311..dac3724e4c1e 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -78,6 +78,9 @@ void irq_migrate_all_off_this_cpu(void) bool affinity_broken; desc = irq_to_desc(irq); + if (!desc) + continue; + raw_spin_lock(&desc->lock); affinity_broken = migrate_one_irq(desc); raw_spin_unlock(&desc->lock); diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 508b65690288..7d0d34c53e08 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -19,5 +19,6 @@ obj-$(CONFIG_SCHED_HMP) += hmp.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o obj-$(CONFIG_SCHED_DEBUG) += debug.o +obj-$(CONFIG_SCHED_TUNE) += tune.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4489bec5d68a..df23b0365527 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7220,6 +7220,10 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds) .avg_load = 0UL, .sum_nr_running = 0, .group_type = group_other, +#ifdef CONFIG_SCHED_HMP + .sum_nr_big_tasks = 0UL, + .group_cpu_load = 0ULL, +#endif }, }; } diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c new file mode 100644 index 000000000000..3c964d6d3856 --- /dev/null +++ b/kernel/sched/tune.c @@ -0,0 +1,241 @@ +#include <linux/cgroup.h> +#include <linux/err.h> +#include <linux/percpu.h> +#include <linux/printk.h> +#include <linux/slab.h> + +#include "sched.h" + +unsigned int sysctl_sched_cfs_boost __read_mostly; + +#ifdef CONFIG_CGROUP_SCHEDTUNE + +/* + * EAS scheduler tunables for task groups. + */ + +/* SchdTune tunables for a group of tasks */ +struct schedtune { + /* SchedTune CGroup subsystem */ + struct cgroup_subsys_state css; + + /* Boost group allocated ID */ + int idx; + + /* Boost value for tasks on that SchedTune CGroup */ + int boost; + +}; + +static inline struct schedtune *css_st(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct schedtune, css) : NULL; +} + +static inline struct schedtune *task_schedtune(struct task_struct *tsk) +{ + return css_st(task_css(tsk, schedtune_cgrp_id)); +} + +static inline struct schedtune *parent_st(struct schedtune *st) +{ + return css_st(st->css.parent); +} + +/* + * SchedTune root control group + * The root control group is used to defined a system-wide boosting tuning, + * which is applied to all tasks in the system. + * Task specific boost tuning could be specified by creating and + * configuring a child control group under the root one. + * By default, system-wide boosting is disabled, i.e. no boosting is applied + * to tasks which are not into a child control group. + */ +static struct schedtune +root_schedtune = { + .boost = 0, +}; + +/* + * Maximum number of boost groups to support + * When per-task boosting is used we still allow only limited number of + * boost groups for two main reasons: + * 1. on a real system we usually have only few classes of workloads which + * make sense to boost with different values (e.g. background vs foreground + * tasks, interactive vs low-priority tasks) + * 2. a limited number allows for a simpler and more memory/time efficient + * implementation especially for the computation of the per-CPU boost + * value + */ +#define BOOSTGROUPS_COUNT 5 + +/* Array of configured boostgroups */ +static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = { + &root_schedtune, + NULL, +}; + +/* SchedTune boost groups + * Keep track of all the boost groups which impact on CPU, for example when a + * CPU has two RUNNABLE tasks belonging to two different boost groups and thus + * likely with different boost values. + * Since on each system we expect only a limited number of boost groups, here + * we use a simple array to keep track of the metrics required to compute the + * maximum per-CPU boosting value. + */ +struct boost_groups { + /* Maximum boost value for all RUNNABLE tasks on a CPU */ + unsigned boost_max; + struct { + /* The boost for tasks on that boost group */ + unsigned boost; + /* Count of RUNNABLE tasks on that boost group */ + unsigned tasks; + } group[BOOSTGROUPS_COUNT]; +}; + +/* Boost groups affecting each CPU in the system */ +DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups); + +static u64 +boost_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + struct schedtune *st = css_st(css); + + return st->boost; +} + +static int +boost_write(struct cgroup_subsys_state *css, struct cftype *cft, + u64 boost) +{ + struct schedtune *st = css_st(css); + + if (boost < 0 || boost > 100) + return -EINVAL; + + st->boost = boost; + if (css == &root_schedtune.css) + sysctl_sched_cfs_boost = boost; + + return 0; +} + +static struct cftype files[] = { + { + .name = "boost", + .read_u64 = boost_read, + .write_u64 = boost_write, + }, + { } /* terminate */ +}; + +static int +schedtune_boostgroup_init(struct schedtune *st) +{ + /* Keep track of allocated boost groups */ + allocated_group[st->idx] = st; + + return 0; +} + +static int +schedtune_init(void) +{ + struct boost_groups *bg; + int cpu; + + /* Initialize the per CPU boost groups */ + for_each_possible_cpu(cpu) { + bg = &per_cpu(cpu_boost_groups, cpu); + memset(bg, 0, sizeof(struct boost_groups)); + } + + pr_info(" schedtune configured to support %d boost groups\n", + BOOSTGROUPS_COUNT); + return 0; +} + +static struct cgroup_subsys_state * +schedtune_css_alloc(struct cgroup_subsys_state *parent_css) +{ + struct schedtune *st; + int idx; + + if (!parent_css) { + schedtune_init(); + return &root_schedtune.css; + } + + /* Allow only single level hierachies */ + if (parent_css != &root_schedtune.css) { + pr_err("Nested SchedTune boosting groups not allowed\n"); + return ERR_PTR(-ENOMEM); + } + + /* Allow only a limited number of boosting groups */ + for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx) + if (!allocated_group[idx]) + break; + if (idx == BOOSTGROUPS_COUNT) { + pr_err("Trying to create more than %d SchedTune boosting groups\n", + BOOSTGROUPS_COUNT); + return ERR_PTR(-ENOSPC); + } + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto out; + + /* Initialize per CPUs boost group support */ + st->idx = idx; + if (schedtune_boostgroup_init(st)) + goto release; + + return &st->css; + +release: + kfree(st); +out: + return ERR_PTR(-ENOMEM); +} + +static void +schedtune_boostgroup_release(struct schedtune *st) +{ + /* Keep track of allocated boost groups */ + allocated_group[st->idx] = NULL; +} + +static void +schedtune_css_free(struct cgroup_subsys_state *css) +{ + struct schedtune *st = css_st(css); + + schedtune_boostgroup_release(st); + kfree(st); +} + +struct cgroup_subsys schedtune_cgrp_subsys = { + .css_alloc = schedtune_css_alloc, + .css_free = schedtune_css_free, + .legacy_cftypes = files, + .early_init = 1, + .allow_attach = subsys_cgroup_allow_attach, +}; + +#endif /* CONFIG_CGROUP_SCHEDTUNE */ + +int +sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret || !write) + return ret; + + return 0; +} + diff --git a/kernel/sysctl.c b/kernel/sysctl.c index cdce7d0f5a0e..587dbe09c47d 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -631,6 +631,21 @@ static struct ctl_table kern_table[] = { .extra1 = &one, }, #endif +#ifdef CONFIG_SCHED_TUNE + { + .procname = "sched_cfs_boost", + .data = &sysctl_sched_cfs_boost, + .maxlen = sizeof(sysctl_sched_cfs_boost), +#ifdef CONFIG_CGROUP_SCHEDTUNE + .mode = 0444, +#else + .mode = 0644, +#endif + .proc_handler = &sysctl_sched_cfs_boost_handler, + .extra1 = &zero, + .extra2 = &one_hundred, + }, +#endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 36ea0d54e05b..902657d4cac5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1020,7 +1020,7 @@ choice config DEBUG_SPINLOCK_BITE_ON_BUG bool "Cause a Watchdog Bite on Spinlock bug" - depends on MSM_WATCHDOG_V2 + depends on QCOM_WATCHDOG_V2 help On a spinlock bug, cause a watchdog bite so that we can get the precise state of the system captured at the time of spin dump. This is mutually diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 2b3f46c049d4..554522934c44 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -74,7 +74,7 @@ next_tag: /* Extract a tag from the data */ tag = data[dp++]; - if (tag == 0) { + if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; @@ -96,10 +96,8 @@ next_tag: /* Extract the length */ len = data[dp++]; - if (len <= 0x7f) { - dp += len; - goto next_tag; - } + if (len <= 0x7f) + goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ @@ -110,14 +108,18 @@ next_tag: } n = len - 0x80; - if (unlikely(n > sizeof(size_t) - 1)) + if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; - for (len = 0; n > 0; n--) { + len = 0; + for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } +check_length: + if (len > datalen - dp) + goto data_overrun_error; dp += len; goto next_tag; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b8f7e621e16e..32027efa5033 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); /* rfc5961 challenge ack rate limiting */ -int sysctl_tcp_challenge_ack_limit = 100; +int sysctl_tcp_challenge_ack_limit = 1000; int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; @@ -3428,7 +3428,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); - u32 now; + u32 count, now; /* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, @@ -3436,13 +3436,18 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) &tp->last_oow_ack_time)) return; - /* Then check the check host-wide RFC 5961 rate limit. */ + /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { + u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; + challenge_timestamp = now; - challenge_count = 0; + WRITE_ONCE(challenge_count, half + + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); } - if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { + count = READ_ONCE(challenge_count); + if (count > 0) { + WRITE_ONCE(challenge_count, count - 1); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c index 69246ac9cc87..ee8b27dbec64 100644 --- a/sound/soc/codecs/wcd-dsp-mgr.c +++ b/sound/soc/codecs/wcd-dsp-mgr.c @@ -16,6 +16,8 @@ #include <linux/stringify.h> #include <linux/of.h> #include <linux/component.h> +#include <linux/dma-mapping.h> +#include <soc/qcom/ramdump.h> #include <sound/wcd-dsp-mgr.h> #include "wcd-dsp-utils.h" @@ -75,6 +77,32 @@ static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type); #define WDSP_STATUS_IS_SET(wdsp, state) (wdsp->status & state) +/* SSR relate status macros */ +#define WDSP_SSR_STATUS_WDSP_READY BIT(0) +#define WDSP_SSR_STATUS_CDC_READY BIT(1) +#define WDSP_SSR_STATUS_READY \ + (WDSP_SSR_STATUS_WDSP_READY | WDSP_SSR_STATUS_CDC_READY) +#define WDSP_SSR_READY_WAIT_TIMEOUT (10 * HZ) + +enum wdsp_ssr_type { + + /* Init value, indicates there is no SSR in progress */ + WDSP_SSR_TYPE_NO_SSR = 0, + + /* + * Indicates WDSP crashed. The manager driver internally + * decides when to perform WDSP restart based on the + * users of wdsp. Hence there is no explicit WDSP_UP. + */ + WDSP_SSR_TYPE_WDSP_DOWN, + + /* Indicates codec hardware is down */ + WDSP_SSR_TYPE_CDC_DOWN, + + /* Indicates codec hardware is up, trigger to restart WDSP */ + WDSP_SSR_TYPE_CDC_UP, +}; + struct wdsp_cmpnt { /* OF node of the phandle */ @@ -96,6 +124,21 @@ struct wdsp_cmpnt { struct wdsp_cmpnt_ops *ops; }; +struct wdsp_ramdump_data { + + /* Ramdump device */ + void *rd_dev; + + /* DMA address of the dump */ + dma_addr_t rd_addr; + + /* Virtual address of the dump */ + void *rd_v_addr; + + /* Data provided through error interrupt */ + struct wdsp_err_intr_arg err_data; +}; + struct wdsp_mgr_priv { /* Manager driver's struct device pointer */ @@ -130,8 +173,35 @@ struct wdsp_mgr_priv { /* Lock for serializing ops called by components */ struct mutex api_mutex; + + struct wdsp_ramdump_data dump_data; + + /* SSR related */ + enum wdsp_ssr_type ssr_type; + struct mutex ssr_mutex; + struct work_struct ssr_work; + u16 ready_status; + struct completion ready_compl; }; +static char *wdsp_get_ssr_type_string(enum wdsp_ssr_type type) +{ + switch (type) { + case WDSP_SSR_TYPE_NO_SSR: + return "NO_SSR"; + case WDSP_SSR_TYPE_WDSP_DOWN: + return "WDSP_DOWN"; + case WDSP_SSR_TYPE_CDC_DOWN: + return "CDC_DOWN"; + case WDSP_SSR_TYPE_CDC_UP: + return "CDC_UP"; + default: + pr_err("%s: Invalid ssr_type %d\n", + __func__, type); + return "Invalid"; + } +} + static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type type) { switch (type) { @@ -148,6 +218,26 @@ static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type type) } } +static void __wdsp_clr_ready_locked(struct wdsp_mgr_priv *wdsp, + u16 value) +{ + wdsp->ready_status &= ~(value); + WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status); +} + +static void __wdsp_set_ready_locked(struct wdsp_mgr_priv *wdsp, + u16 value, bool mark_complete) +{ + wdsp->ready_status |= value; + WDSP_DBG(wdsp, "ready_status = 0x%x", wdsp->ready_status); + + if (mark_complete && + wdsp->ready_status == WDSP_SSR_STATUS_READY) { + WDSP_DBG(wdsp, "marking ready completion"); + complete(&wdsp->ready_compl); + } +} + static void wdsp_broadcast_event_upseq(struct wdsp_mgr_priv *wdsp, enum wdsp_event_type event, void *data) @@ -199,6 +289,18 @@ static int wdsp_unicast_event(struct wdsp_mgr_priv *wdsp, return ret; } +static void wdsp_deinit_components(struct wdsp_mgr_priv *wdsp) +{ + struct wdsp_cmpnt *cmpnt; + int i; + + for (i = WDSP_CMPNT_TYPE_MAX - 1; i >= 0; i--) { + cmpnt = WDSP_GET_COMPONENT(wdsp, i); + if (cmpnt && cmpnt->ops && cmpnt->ops->deinit) + cmpnt->ops->deinit(cmpnt->cdev, cmpnt->priv_data); + } +} + static int wdsp_init_components(struct wdsp_mgr_priv *wdsp) { struct wdsp_cmpnt *cmpnt; @@ -230,6 +332,8 @@ static int wdsp_init_components(struct wdsp_mgr_priv *wdsp) cmpnt->ops->deinit(cmpnt->cdev, cmpnt->priv_data); } + } else { + wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_INIT, NULL); } return ret; @@ -272,6 +376,7 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp, struct wdsp_cmpnt *ctl; struct wdsp_img_segment *seg = NULL; enum wdsp_event_type pre, post; + long status; int ret; ctl = WDSP_GET_COMPONENT(wdsp, WDSP_CMPNT_CONTROL); @@ -279,9 +384,11 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp, if (type == WDSP_ELF_FLAG_RE) { pre = WDSP_EVENT_PRE_DLOAD_CODE; post = WDSP_EVENT_POST_DLOAD_CODE; + status = WDSP_STATUS_CODE_DLOADED; } else if (type == WDSP_ELF_FLAG_WRITE) { pre = WDSP_EVENT_PRE_DLOAD_DATA; post = WDSP_EVENT_POST_DLOAD_DATA; + status = WDSP_STATUS_DATA_DLOADED; } else { WDSP_ERR(wdsp, "Invalid type %u", type); return -EINVAL; @@ -312,6 +419,8 @@ static int wdsp_download_segments(struct wdsp_mgr_priv *wdsp, } } + WDSP_SET_STATUS(wdsp, status); + /* Notify all components that image is downloaded */ wdsp_broadcast_event_downseq(wdsp, post, NULL); @@ -321,42 +430,47 @@ done: return ret; } -static void wdsp_load_fw_image(struct work_struct *work) +static int wdsp_init_and_dload_code_sections(struct wdsp_mgr_priv *wdsp) { - struct wdsp_mgr_priv *wdsp; - struct wdsp_cmpnt *cmpnt; - int ret, idx; - - wdsp = container_of(work, struct wdsp_mgr_priv, load_fw_work); - if (!wdsp) { - pr_err("%s: Invalid private_data\n", __func__); - goto done; - } + int ret; + bool is_initialized; - /* Initialize the components first */ - ret = wdsp_init_components(wdsp); - if (IS_ERR_VALUE(ret)) - goto done; + is_initialized = WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_INITIALIZED); - /* Set init done status */ - WDSP_SET_STATUS(wdsp, WDSP_STATUS_INITIALIZED); + if (!is_initialized) { + /* Components are not initialized yet, initialize them */ + ret = wdsp_init_components(wdsp); + if (IS_ERR_VALUE(ret)) { + WDSP_ERR(wdsp, "INIT failed, err = %d", ret); + goto done; + } + WDSP_SET_STATUS(wdsp, WDSP_STATUS_INITIALIZED); + } /* Download the read-execute sections of image */ ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_RE); if (IS_ERR_VALUE(ret)) { WDSP_ERR(wdsp, "Error %d to download code sections", ret); - for (idx = 0; idx < WDSP_CMPNT_TYPE_MAX; idx++) { - cmpnt = WDSP_GET_COMPONENT(wdsp, idx); - if (cmpnt->ops && cmpnt->ops->deinit) - cmpnt->ops->deinit(cmpnt->cdev, - cmpnt->priv_data); - } - WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED); + goto done; } - - WDSP_SET_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED); done: - return; + return ret; +} + +static void wdsp_load_fw_image(struct work_struct *work) +{ + struct wdsp_mgr_priv *wdsp; + int ret; + + wdsp = container_of(work, struct wdsp_mgr_priv, load_fw_work); + if (!wdsp) { + pr_err("%s: Invalid private_data\n", __func__); + return; + } + + ret = wdsp_init_and_dload_code_sections(wdsp); + if (IS_ERR_VALUE(ret)) + WDSP_ERR(wdsp, "dload code sections failed, err = %d", ret); } static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp) @@ -377,8 +491,6 @@ static int wdsp_enable_dsp(struct wdsp_mgr_priv *wdsp) goto done; } - WDSP_SET_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED); - wdsp_broadcast_event_upseq(wdsp, WDSP_EVENT_PRE_BOOTUP, NULL); ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL, @@ -399,6 +511,21 @@ static int wdsp_disable_dsp(struct wdsp_mgr_priv *wdsp) { int ret; + WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex); + + /* + * If Disable happened while SSR is in progress, then set the SSR + * ready status indicating WDSP is now ready. Ignore the disable + * event here and let the SSR handler go through shutdown. + */ + if (wdsp->ssr_type != WDSP_SSR_TYPE_NO_SSR) { + __wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY, true); + WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex); + return 0; + } + + WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex); + /* Make sure wdsp is in good state */ if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) { WDSP_ERR(wdsp, "wdsp in invalid state 0x%x", wdsp->status); @@ -478,8 +605,190 @@ static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev, return cmpnt->cdev; } +static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp) +{ + struct wdsp_img_section img_section; + struct wdsp_err_intr_arg *data = &wdsp->dump_data.err_data; + struct ramdump_segment rd_seg; + int ret = 0; + + if (wdsp->ssr_type != WDSP_SSR_TYPE_WDSP_DOWN || + !data->mem_dumps_enabled) { + WDSP_DBG(wdsp, "cannot dump memory, ssr_type %s, dumps %s", + wdsp_get_ssr_type_string(wdsp->ssr_type), + !(data->mem_dumps_enabled) ? "disabled" : "enabled"); + goto done; + } + + if (data->dump_size == 0 || + data->remote_start_addr < wdsp->base_addr) { + WDSP_ERR(wdsp, "Invalid start addr 0x%x or dump_size 0x%zx", + data->remote_start_addr, data->dump_size); + goto done; + } + + if (!wdsp->dump_data.rd_dev) { + WDSP_ERR(wdsp, "Ramdump device is not setup"); + goto done; + } + + WDSP_DBG(wdsp, "base_addr 0x%x, dump_start_addr 0x%x, dump_size 0x%zx", + wdsp->base_addr, data->remote_start_addr, data->dump_size); + + /* Allocate memory for dumps */ + wdsp->dump_data.rd_v_addr = dma_alloc_coherent(wdsp->mdev, + data->dump_size, + &wdsp->dump_data.rd_addr, + GFP_KERNEL); + if (!wdsp->dump_data.rd_v_addr) + goto done; + + img_section.addr = data->remote_start_addr - wdsp->base_addr; + img_section.size = data->dump_size; + img_section.data = wdsp->dump_data.rd_v_addr; + + ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_TRANSPORT, + WDSP_EVENT_READ_SECTION, + &img_section); + if (IS_ERR_VALUE(ret)) { + WDSP_ERR(wdsp, "Failed to read dumps, size 0x%zx at addr 0x%x", + img_section.size, img_section.addr); + goto err_read_dumps; + } + + rd_seg.address = (unsigned long) wdsp->dump_data.rd_v_addr; + rd_seg.size = img_section.size; + rd_seg.v_address = wdsp->dump_data.rd_v_addr; + + ret = do_ramdump(wdsp->dump_data.rd_dev, &rd_seg, 1); + if (IS_ERR_VALUE(ret)) + WDSP_ERR(wdsp, "do_ramdump failed with error %d", ret); + +err_read_dumps: + dma_free_coherent(wdsp->mdev, data->dump_size, + wdsp->dump_data.rd_v_addr, wdsp->dump_data.rd_addr); +done: + return; +} + +static void wdsp_ssr_work_fn(struct work_struct *work) +{ + struct wdsp_mgr_priv *wdsp; + int ret; + + wdsp = container_of(work, struct wdsp_mgr_priv, ssr_work); + if (!wdsp) { + pr_err("%s: Invalid private_data\n", __func__); + return; + } + + WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex); + + wdsp_collect_ramdumps(wdsp); + + /* In case of CDC_DOWN event, the DSP is already shutdown */ + if (wdsp->ssr_type != WDSP_SSR_TYPE_CDC_DOWN) { + ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL, + WDSP_EVENT_DO_SHUTDOWN, NULL); + if (IS_ERR_VALUE(ret)) + WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret); + } + wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN, NULL); + WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED); + + WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex); + ret = wait_for_completion_timeout(&wdsp->ready_compl, + WDSP_SSR_READY_WAIT_TIMEOUT); + WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex); + if (ret == 0) { + WDSP_ERR(wdsp, "wait_for_ready timed out, status = 0x%x", + wdsp->ready_status); + goto done; + } + + /* Data sections are to downloaded per WDSP boot */ + WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_DATA_DLOADED); + + /* + * Even though code section could possible be retained on DSP + * crash, go ahead and still re-download just to avoid any + * memory corruption from previous crash. + */ + WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_CODE_DLOADED); + + /* If codec went down, then all components must be re-initialized */ + if (wdsp->ssr_type == WDSP_SSR_TYPE_CDC_DOWN) { + wdsp_deinit_components(wdsp); + WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_INITIALIZED); + } + + ret = wdsp_init_and_dload_code_sections(wdsp); + if (IS_ERR_VALUE(ret)) { + WDSP_ERR(wdsp, "Failed to dload code sections err = %d", + ret); + goto done; + } + + /* SSR handling is finished, mark SSR type as NO_SSR */ + wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR; +done: + WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex); +} + +static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg, + enum wdsp_ssr_type ssr_type) +{ + enum wdsp_ssr_type current_ssr_type; + struct wdsp_err_intr_arg *err_data; + + WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex); + + current_ssr_type = wdsp->ssr_type; + WDSP_DBG(wdsp, "Current ssr_type %s, handling ssr_type %s", + wdsp_get_ssr_type_string(current_ssr_type), + wdsp_get_ssr_type_string(ssr_type)); + wdsp->ssr_type = ssr_type; + + if (arg) { + err_data = (struct wdsp_err_intr_arg *) arg; + memcpy(&wdsp->dump_data.err_data, err_data, + sizeof(*err_data)); + } else { + memset(&wdsp->dump_data.err_data, 0, + sizeof(wdsp->dump_data.err_data)); + } + + switch (ssr_type) { + + case WDSP_SSR_TYPE_WDSP_DOWN: + case WDSP_SSR_TYPE_CDC_DOWN: + __wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY); + if (ssr_type == WDSP_SSR_TYPE_CDC_DOWN) + __wdsp_clr_ready_locked(wdsp, + WDSP_SSR_STATUS_CDC_READY); + wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN, + NULL); + schedule_work(&wdsp->ssr_work); + break; + + case WDSP_SSR_TYPE_CDC_UP: + __wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY, true); + break; + + default: + WDSP_ERR(wdsp, "undefined ssr_type %d\n", ssr_type); + /* Revert back the ssr_type for undefined events */ + wdsp->ssr_type = current_ssr_type; + break; + } + + WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex); + + return 0; +} + static int wdsp_intr_handler(struct device *wdsp_dev, - enum wdsp_intr intr) + enum wdsp_intr intr, void *arg) { struct wdsp_mgr_priv *wdsp; int ret; @@ -495,6 +804,9 @@ static int wdsp_intr_handler(struct device *wdsp_dev, ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_IPC, WDSP_EVENT_IPC1_INTR, NULL); break; + case WDSP_ERR_INTR: + ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_WDSP_DOWN); + break; default: ret = -EINVAL; break; @@ -585,6 +897,11 @@ static int wdsp_mgr_bind(struct device *dev) wdsp->ops = &wdsp_ops; + /* Setup ramdump device */ + wdsp->dump_data.rd_dev = create_ramdump_device("wdsp", dev); + if (!wdsp->dump_data.rd_dev) + dev_info(dev, "%s: create_ramdump_device failed\n", __func__); + ret = component_bind_all(dev, wdsp->ops); if (IS_ERR_VALUE(ret)) WDSP_ERR(wdsp, "component_bind_all failed %d\n", ret); @@ -616,6 +933,11 @@ static void wdsp_mgr_unbind(struct device *dev) component_unbind_all(dev, wdsp->ops); + if (wdsp->dump_data.rd_dev) { + destroy_ramdump_device(wdsp->dump_data.rd_dev); + wdsp->dump_data.rd_dev = NULL; + } + /* Clear all status bits */ wdsp->status = 0x00; @@ -746,6 +1068,12 @@ static int wdsp_mgr_probe(struct platform_device *pdev) INIT_WORK(&wdsp->load_fw_work, wdsp_load_fw_image); INIT_LIST_HEAD(wdsp->seg_list); mutex_init(&wdsp->api_mutex); + mutex_init(&wdsp->ssr_mutex); + wdsp->ssr_type = WDSP_SSR_TYPE_NO_SSR; + wdsp->ready_status = WDSP_SSR_STATUS_READY; + INIT_WORK(&wdsp->ssr_work, wdsp_ssr_work_fn); + init_completion(&wdsp->ready_compl); + arch_setup_dma_ops(wdsp->mdev, 0, 0, NULL, 0); dev_set_drvdata(mdev, wdsp); ret = component_master_add_with_match(mdev, &wdsp_master_ops, @@ -759,6 +1087,7 @@ static int wdsp_mgr_probe(struct platform_device *pdev) err_master_add: mutex_destroy(&wdsp->api_mutex); + mutex_destroy(&wdsp->ssr_mutex); err_dt_parse: devm_kfree(mdev, wdsp->seg_list); devm_kfree(mdev, wdsp); @@ -775,6 +1104,7 @@ static int wdsp_mgr_remove(struct platform_device *pdev) component_master_del(mdev, &wdsp_master_ops); mutex_destroy(&wdsp->api_mutex); + mutex_destroy(&wdsp->ssr_mutex); devm_kfree(mdev, wdsp->seg_list); devm_kfree(mdev, wdsp); dev_set_drvdata(mdev, NULL); diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c index d207d9ccda34..3cbc1e7821cf 100644 --- a/sound/soc/codecs/wcd-mbhc-v2.c +++ b/sound/soc/codecs/wcd-mbhc-v2.c @@ -2318,7 +2318,7 @@ int wcd_mbhc_start(struct wcd_mbhc *mbhc, schedule_delayed_work(&mbhc->mbhc_firmware_dwork, usecs_to_jiffies(FW_READ_TIMEOUT)); else - pr_err("%s: Skipping to read mbhc fw, 0x%p %p\n", + pr_err("%s: Skipping to read mbhc fw, 0x%pK %pK\n", __func__, mbhc->mbhc_fw, mbhc->mbhc_cal); } pr_debug("%s: leave %d\n", __func__, rc); diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c index 3049d87c6c05..60efcb174740 100644 --- a/sound/soc/codecs/wcd-spi.c +++ b/sound/soc/codecs/wcd-spi.c @@ -639,12 +639,10 @@ static int wcd_spi_init(struct spi_device *spi) WCD_SPI_SLAVE_TRNS_LEN, 0xFFFF0000, (WCD_SPI_RW_MULTI_MAX_LEN / 4) << 16); -done: - return ret; - err_wr_en: wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE, WCD_SPI_CLK_FLAG_IMMEDIATE); +done: return ret; } @@ -813,6 +811,27 @@ static int wdsp_spi_dload_section(struct spi_device *spi, return ret; } +static int wdsp_spi_read_section(struct spi_device *spi, void *data) +{ + struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi); + struct wdsp_img_section *sec = data; + struct wcd_spi_msg msg; + int ret; + + msg.remote_addr = sec->addr + wcd_spi->mem_base_addr; + msg.data = sec->data; + msg.len = sec->size; + + dev_dbg(&spi->dev, "%s: addr = 0x%x, size = 0x%zx\n", + __func__, msg.remote_addr, msg.len); + + ret = wcd_spi_data_xfer(spi, &msg, WCD_SPI_XFER_READ); + if (IS_ERR_VALUE(ret)) + dev_err(&spi->dev, "%s: fail addr (0x%x) size (0x%zx)\n", + __func__, msg.remote_addr, msg.len); + return ret; +} + static int wdsp_spi_event_handler(struct device *dev, void *priv_data, enum wdsp_event_type event, void *data) @@ -824,6 +843,7 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data, __func__, event); switch (event) { + case WDSP_EVENT_PRE_DLOAD_CODE: case WDSP_EVENT_PRE_DLOAD_DATA: ret = wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_ENABLE, WCD_SPI_CLK_FLAG_IMMEDIATE); @@ -846,6 +866,11 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data, case WDSP_EVENT_DLOAD_SECTION: ret = wdsp_spi_dload_section(spi, data); break; + + case WDSP_EVENT_READ_SECTION: + ret = wdsp_spi_read_section(spi, data); + break; + default: dev_dbg(&spi->dev, "%s: Unhandled event %d\n", __func__, event); diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c index a8d6e0fa4732..fa396aa55ac9 100644 --- a/sound/soc/codecs/wcd9330.c +++ b/sound/soc/codecs/wcd9330.c @@ -5474,7 +5474,7 @@ static int tomtom_set_channel_map(struct snd_soc_dai *dai, struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec); struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent); if (!tx_slot || !rx_slot) { - pr_err("%s: Invalid tx_slot=%p, rx_slot=%p\n", + pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n", __func__, tx_slot, rx_slot); return -EINVAL; } @@ -5519,7 +5519,7 @@ static int tomtom_get_channel_map(struct snd_soc_dai *dai, case AIF2_PB: case AIF3_PB: if (!rx_slot || !rx_num) { - pr_err("%s: Invalid rx_slot %p or rx_num %p\n", + pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n", __func__, rx_slot, rx_num); return -EINVAL; } @@ -5538,7 +5538,7 @@ static int tomtom_get_channel_map(struct snd_soc_dai *dai, case AIF4_VIFEED: case AIF4_MAD_TX: if (!tx_slot || !tx_num) { - pr_err("%s: Invalid tx_slot %p or tx_num %p\n", + pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n", __func__, tx_slot, tx_num); return -EINVAL; } @@ -8228,7 +8228,7 @@ static void tomtom_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r, struct tomtom_priv *tomtom; if (!mbhc) { - pr_err("%s: Invalid parameters mbhc = %p\n", + pr_err("%s: Invalid parameters mbhc = %pK\n", __func__, mbhc); return; } @@ -8287,7 +8287,7 @@ static void tomtom_zdet_error_approx(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, const int shift = TOMTOM_ZDET_ERROR_APPROX_SHIFT; if (!zl || !zr || !mbhc) { - pr_err("%s: Invalid parameters zl = %p zr = %p, mbhc = %p\n", + pr_err("%s: Invalid parameters zl = %pK zr = %pK, mbhc = %pK\n", __func__, zl, zr, mbhc); return; } @@ -8602,7 +8602,7 @@ static int tomtom_codec_fll_enable(struct snd_soc_codec *codec, struct wcd9xxx *wcd9xxx; if (!codec || !codec->control_data) { - pr_err("%s: Invalid codec handle, %p\n", + pr_err("%s: Invalid codec handle, %pK\n", __func__, codec); return -EINVAL; } diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 2cc895cf1d32..46b8e7f72eb8 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -11080,7 +11080,7 @@ static int tasha_get_channel_map(struct snd_soc_dai *dai, case AIF4_PB: case AIF_MIX1_PB: if (!rx_slot || !rx_num) { - pr_err("%s: Invalid rx_slot %p or rx_num %p\n", + pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n", __func__, rx_slot, rx_num); return -EINVAL; } @@ -11099,7 +11099,7 @@ static int tasha_get_channel_map(struct snd_soc_dai *dai, case AIF4_MAD_TX: case AIF4_VIFEED: if (!tx_slot || !tx_num) { - pr_err("%s: Invalid tx_slot %p or tx_num %p\n", + pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n", __func__, tx_slot, tx_num); return -EINVAL; } @@ -11137,7 +11137,7 @@ static int tasha_set_channel_map(struct snd_soc_dai *dai, core = dev_get_drvdata(dai->codec->dev->parent); if (!tx_slot || !rx_slot) { - pr_err("%s: Invalid tx_slot=%p, rx_slot=%p\n", + pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n", __func__, tx_slot, rx_slot); return -EINVAL; } diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c index 225b3a755f66..e649770297f1 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c +++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c @@ -28,19 +28,22 @@ #define WCD_MEM_ENABLE_MAX_RETRIES 20 #define WCD_DSP_BOOT_TIMEOUT_MS 3000 #define WCD_SYSFS_ENTRY_MAX_LEN 8 - -#define WCD_CNTL_MUTEX_LOCK(codec, lock) \ -{ \ - dev_dbg(codec->dev, "mutex_lock(%s)\n", \ - __func__); \ - mutex_lock(&lock); \ +#define WCD_PROCFS_ENTRY_MAX_LEN 16 +#define WCD_934X_RAMDUMP_START_ADDR 0x20100000 +#define WCD_934X_RAMDUMP_SIZE ((1024 * 1024) - 128) + +#define WCD_CNTL_MUTEX_LOCK(codec, lock) \ +{ \ + dev_dbg(codec->dev, "%s: mutex_lock(%s)\n", \ + __func__, __stringify_1(lock)); \ + mutex_lock(&lock); \ } -#define WCD_CNTL_MUTEX_UNLOCK(codec, lock) \ -{ \ - dev_dbg(codec->dev, "mutex_unlock(%s)\n",\ - __func__); \ - mutex_unlock(&lock); \ +#define WCD_CNTL_MUTEX_UNLOCK(codec, lock) \ +{ \ + dev_dbg(codec->dev, "%s: mutex_unlock(%s)\n", \ + __func__, __stringify_1(lock)); \ + mutex_unlock(&lock); \ } struct wcd_cntl_attribute { @@ -147,6 +150,97 @@ static struct kobj_type wcd_cntl_ktype = { .sysfs_ops = &wcd_cntl_sysfs_ops, }; +static void wcd_cntl_change_online_state(struct wcd_dsp_cntl *cntl, + u8 online) +{ + struct wdsp_ssr_entry *ssr_entry = &cntl->ssr_entry; + unsigned long ret; + + WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex); + ssr_entry->offline = !online; + /* Make sure the write is complete */ + wmb(); + ret = xchg(&ssr_entry->offline_change, 1); + wake_up_interruptible(&ssr_entry->offline_poll_wait); + dev_dbg(cntl->codec->dev, + "%s: requested %u, offline %u offline_change %u, ret = %ldn", + __func__, online, ssr_entry->offline, + ssr_entry->offline_change, ret); + WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex); +} + +static ssize_t wdsp_ssr_entry_read(struct snd_info_entry *entry, + void *file_priv_data, struct file *file, + char __user *buf, size_t count, loff_t pos) +{ + int len = 0; + char buffer[WCD_PROCFS_ENTRY_MAX_LEN]; + struct wcd_dsp_cntl *cntl; + struct wdsp_ssr_entry *ssr_entry; + ssize_t ret; + u8 offline; + + cntl = (struct wcd_dsp_cntl *) entry->private_data; + if (!cntl) { + pr_err("%s: Invalid private data for SSR procfs entry\n", + __func__); + return -EINVAL; + } + + ssr_entry = &cntl->ssr_entry; + + WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex); + offline = ssr_entry->offline; + /* Make sure the read is complete */ + rmb(); + dev_dbg(cntl->codec->dev, "%s: offline = %s\n", __func__, + offline ? "true" : "false"); + len = snprintf(buffer, sizeof(buffer), "%s\n", + offline ? "OFFLINE" : "ONLINE"); + ret = simple_read_from_buffer(buf, count, &pos, buffer, len); + WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex); + + return ret; +} + +static unsigned int wdsp_ssr_entry_poll(struct snd_info_entry *entry, + void *private_data, struct file *file, + poll_table *wait) +{ + struct wcd_dsp_cntl *cntl; + struct wdsp_ssr_entry *ssr_entry; + unsigned int ret = 0; + + if (!entry || !entry->private_data) { + pr_err("%s: %s is NULL\n", __func__, + (!entry) ? "entry" : "private_data"); + return -EINVAL; + } + + cntl = (struct wcd_dsp_cntl *) entry->private_data; + ssr_entry = &cntl->ssr_entry; + + dev_dbg(cntl->codec->dev, "%s: Poll wait, offline = %u\n", + __func__, ssr_entry->offline); + poll_wait(file, &ssr_entry->offline_poll_wait, wait); + dev_dbg(cntl->codec->dev, "%s: Woken up Poll wait, offline = %u\n", + __func__, ssr_entry->offline); + + WCD_CNTL_MUTEX_LOCK(cntl->codec, cntl->ssr_mutex); + if (xchg(&ssr_entry->offline_change, 0)) + ret = POLLIN | POLLPRI | POLLRDNORM; + dev_dbg(cntl->codec->dev, "%s: ret (%d) from poll_wait\n", + __func__, ret); + WCD_CNTL_MUTEX_UNLOCK(cntl->codec, cntl->ssr_mutex); + + return ret; +} + +static struct snd_info_entry_ops wdsp_ssr_entry_ops = { + .read = wdsp_ssr_entry_read, + .poll = wdsp_ssr_entry_poll, +}; + static int wcd_cntl_cpe_fll_calibrate(struct wcd_dsp_cntl *cntl) { struct snd_soc_codec *codec = cntl->codec; @@ -553,7 +647,8 @@ static irqreturn_t wcd_cntl_ipc_irq(int irq, void *data) if (cntl->m_dev && cntl->m_ops && cntl->m_ops->intr_handler) - ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_IPC1_INTR); + ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_IPC1_INTR, + NULL); else ret = -EINVAL; @@ -568,8 +663,10 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data) { struct wcd_dsp_cntl *cntl = data; struct snd_soc_codec *codec = cntl->codec; + struct wdsp_err_intr_arg arg; u16 status = 0; u8 reg_val; + int ret = 0; reg_val = snd_soc_read(codec, WCD934X_CPE_SS_SS_ERROR_INT_STATUS_0A); status = status | reg_val; @@ -580,6 +677,23 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data) dev_info(codec->dev, "%s: error interrupt status = 0x%x\n", __func__, status); + if ((status & cntl->irqs.fatal_irqs) && + (cntl->m_dev && cntl->m_ops && cntl->m_ops->intr_handler)) { + arg.mem_dumps_enabled = cntl->ramdump_enable; + arg.remote_start_addr = WCD_934X_RAMDUMP_START_ADDR; + arg.dump_size = WCD_934X_RAMDUMP_SIZE; + ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_ERR_INTR, + &arg); + if (IS_ERR_VALUE(ret)) + dev_err(cntl->codec->dev, + "%s: Failed to handle fatal irq 0x%x\n", + __func__, status & cntl->irqs.fatal_irqs); + wcd_cntl_change_online_state(cntl, 0); + } else { + dev_err(cntl->codec->dev, "%s: Invalid intr_handler\n", + __func__); + } + return IRQ_HANDLED; } @@ -591,10 +705,15 @@ static int wcd_control_handler(struct device *dev, void *priv_data, int ret = 0; switch (event) { + case WDSP_EVENT_POST_INIT: case WDSP_EVENT_POST_DLOAD_CODE: case WDSP_EVENT_DLOAD_FAILED: case WDSP_EVENT_POST_SHUTDOWN: + if (event == WDSP_EVENT_POST_DLOAD_CODE) + /* Mark DSP online since code download is complete */ + wcd_cntl_change_online_state(cntl, 1); + /* Disable CPAR */ wcd_cntl_cpar_ctrl(cntl, false); /* Disable all the clocks */ @@ -605,12 +724,8 @@ static int wcd_control_handler(struct device *dev, void *priv_data, __func__, ret); break; - case WDSP_EVENT_PRE_DLOAD_CODE: - - wcd_cntl_enable_memory(cntl); - break; - case WDSP_EVENT_PRE_DLOAD_DATA: + case WDSP_EVENT_PRE_DLOAD_CODE: /* Enable all the clocks */ ret = wcd_cntl_clocks_enable(cntl); @@ -623,6 +738,9 @@ static int wcd_control_handler(struct device *dev, void *priv_data, /* Enable CPAR */ wcd_cntl_cpar_ctrl(cntl, true); + + if (event == WDSP_EVENT_PRE_DLOAD_CODE) + wcd_cntl_enable_memory(cntl); break; case WDSP_EVENT_DO_BOOT: @@ -697,6 +815,8 @@ static void wcd_cntl_debugfs_init(char *dir, struct wcd_dsp_cntl *cntl) debugfs_create_u32("debug_mode", S_IRUGO | S_IWUSR, cntl->entry, &cntl->debug_mode); + debugfs_create_bool("ramdump_enable", S_IRUGO | S_IWUSR, + cntl->entry, &cntl->ramdump_enable); done: return; } @@ -827,6 +947,10 @@ static int wcd_ctrl_component_bind(struct device *dev, void *data) { struct wcd_dsp_cntl *cntl; + struct snd_soc_codec *codec; + struct snd_card *card; + struct snd_info_entry *entry; + char proc_name[WCD_PROCFS_ENTRY_MAX_LEN]; int ret = 0; if (!dev || !master || !data) { @@ -844,13 +968,47 @@ static int wcd_ctrl_component_bind(struct device *dev, cntl->m_dev = master; cntl->m_ops = data; - if (cntl->m_ops->register_cmpnt_ops) - ret = cntl->m_ops->register_cmpnt_ops(master, dev, cntl, - &control_ops); + if (!cntl->m_ops->register_cmpnt_ops) { + dev_err(dev, "%s: invalid master callback register_cmpnt_ops\n", + __func__); + ret = -EINVAL; + goto done; + } - if (ret) + ret = cntl->m_ops->register_cmpnt_ops(master, dev, cntl, &control_ops); + if (ret) { dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n", __func__, ret); + goto done; + } + + codec = cntl->codec; + card = codec->component.card->snd_card; + snprintf(proc_name, WCD_PROCFS_ENTRY_MAX_LEN, "%s%d%s", "cpe", + cntl->dsp_instance, "_state"); + entry = snd_info_create_card_entry(card, proc_name, card->proc_root); + if (!entry) { + /* Do not treat this as Fatal error */ + dev_err(dev, "%s: Failed to create procfs entry %s\n", + __func__, proc_name); + goto done; + } + + cntl->ssr_entry.entry = entry; + cntl->ssr_entry.offline = 1; + entry->size = WCD_PROCFS_ENTRY_MAX_LEN; + entry->content = SNDRV_INFO_CONTENT_DATA; + entry->c.ops = &wdsp_ssr_entry_ops; + entry->private_data = cntl; + ret = snd_info_register(entry); + if (IS_ERR_VALUE(ret)) { + dev_err(dev, "%s: Failed to register entry %s, err = %d\n", + __func__, proc_name, ret); + snd_info_free_entry(entry); + /* Let bind still happen even if creating the entry failed */ + ret = 0; + } +done: return ret; } @@ -929,6 +1087,8 @@ void wcd_dsp_cntl_init(struct snd_soc_codec *codec, memcpy(&control->irqs, ¶ms->irqs, sizeof(control->irqs)); init_completion(&control->boot_complete); mutex_init(&control->clk_mutex); + mutex_init(&control->ssr_mutex); + init_waitqueue_head(&control->ssr_entry.offline_poll_wait); /* * The default state of WDSP is in SVS mode. @@ -981,6 +1141,7 @@ void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl) component_del(codec->dev, &wcd_ctrl_component_ops); mutex_destroy(&control->clk_mutex); + mutex_destroy(&control->ssr_mutex); kfree(*cntl); *cntl = NULL; } diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h index 3d6db776a0b5..cd6697b3d641 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h +++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h @@ -54,6 +54,13 @@ struct wcd_dsp_params { u32 dsp_instance; }; +struct wdsp_ssr_entry { + u8 offline; + u8 offline_change; + wait_queue_head_t offline_poll_wait; + struct snd_info_entry *entry; +}; + struct wcd_dsp_cntl { /* Handle to codec */ struct snd_soc_codec *codec; @@ -77,6 +84,7 @@ struct wcd_dsp_cntl { /* Debugfs related */ struct dentry *entry; u32 debug_mode; + bool ramdump_enable; /* WDSP manager drivers data */ struct device *m_dev; @@ -88,6 +96,10 @@ struct wcd_dsp_cntl { /* Keep track of WDSP boot status */ bool is_wdsp_booted; + + /* SSR related */ + struct wdsp_ssr_entry ssr_entry; + struct mutex ssr_mutex; }; void wcd_dsp_cntl_init(struct snd_soc_codec *codec, diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h index ac3031ffe615..940fdf89d361 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-routing.h +++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h @@ -21,6 +21,7 @@ const struct snd_soc_dapm_route tavil_slim_audio_map[] = { {"AIF1 CAP", NULL, "AIF1_CAP Mixer"}, {"AIF2 CAP", NULL, "AIF2_CAP Mixer"}, {"AIF3 CAP", NULL, "AIF3_CAP Mixer"}, + {"AIF4 MAD", NULL, "AIF4_MAD Mixer"}, /* Virtual input widget Mixer */ {"AIF1_CAP Mixer", "SLIM TX0", "SLIM TX0"}, @@ -65,6 +66,8 @@ const struct snd_soc_dapm_route tavil_slim_audio_map[] = { {"AIF3_CAP Mixer", "SLIM TX11", "SLIM TX11"}, {"AIF3_CAP Mixer", "SLIM TX13", "SLIM TX13"}, + {"AIF4_MAD Mixer", "SLIM TX13", "SLIM TX13"}, + {"SLIM RX0 MUX", "AIF1_PB", "AIF1 PB"}, {"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"}, {"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"}, @@ -121,6 +124,7 @@ const struct snd_soc_dapm_route tavil_audio_map[] = { {"MAD_INP MUX", "MAD", "MAD_SEL MUX"}, {"MAD_INP MUX", "DEC1", "ADC MUX1"}, + {"MAD_BROADCAST", "Switch", "MAD_INP MUX"}, {"MAD_CPE1", "Switch", "MAD_INP MUX"}, {"MAD_CPE2", "Switch", "MAD_INP MUX"}, diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c index b4a73d17c322..9e18c17d6f1c 100644 --- a/sound/soc/codecs/wcd934x/wcd934x.c +++ b/sound/soc/codecs/wcd934x/wcd934x.c @@ -186,6 +186,7 @@ enum { AIF3_CAP, AIF4_PB, AIF4_VIFEED, + AIF4_MAD_TX, NUM_CODEC_DAIS, }; @@ -301,13 +302,13 @@ static const struct wcd9xxx_ch tavil_tx_chs[WCD934X_TX_MAX] = { }; static const u32 vport_slim_check_table[NUM_CODEC_DAIS] = { - 0, /* AIF1_PB */ - BIT(AIF2_CAP) | BIT(AIF3_CAP), /* AIF1_CAP */ - 0, /* AIF2_PB */ - BIT(AIF1_CAP) | BIT(AIF3_CAP), /* AIF2_CAP */ - 0, /* AIF3_PB */ - BIT(AIF1_CAP) | BIT(AIF2_CAP), /* AIF3_CAP */ - 0, /* AIF4_PB */ + 0, /* AIF1_PB */ + BIT(AIF2_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX), /* AIF1_CAP */ + 0, /* AIF2_PB */ + BIT(AIF1_CAP) | BIT(AIF3_CAP) | BIT(AIF4_MAD_TX), /* AIF2_CAP */ + 0, /* AIF3_PB */ + BIT(AIF1_CAP) | BIT(AIF2_CAP) | BIT(AIF4_MAD_TX), /* AIF3_CAP */ + 0, /* AIF4_PB */ }; /* Codec supports 2 IIR filters */ @@ -1271,6 +1272,8 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol, return 0; } break; + case AIF4_MAD_TX: + break; default: dev_err(codec->dev, "Unknown AIF %d\n", dai_id); mutex_unlock(&tavil_p->codec_mutex); @@ -2474,7 +2477,7 @@ done: return ret; } -static int tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable) +static int __tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable) { int rc = 0; @@ -2519,6 +2522,29 @@ done: return rc; } +static int tavil_codec_ape_enable_mad(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, + int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec); + int rc = 0; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x40); + rc = __tavil_codec_enable_mad(codec, true); + break; + case SND_SOC_DAPM_PRE_PMD: + snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x40, 0x00); + __tavil_codec_enable_mad(codec, false); + break; + } + + dev_dbg(tavil->dev, "%s: event = %d\n", __func__, event); + return rc; +} + static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -2533,7 +2559,7 @@ static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w, goto done; snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x20); - rc = tavil_codec_enable_mad(codec, true); + rc = __tavil_codec_enable_mad(codec, true); if (IS_ERR_VALUE(rc)) { tavil->mad_switch_cnt--; goto done; @@ -2546,7 +2572,7 @@ static int tavil_codec_cpe_mad_ctl(struct snd_soc_dapm_widget *w, goto done; snd_soc_update_bits(codec, WCD934X_CPE_SS_SVA_CFG, 0x20, 0x00); - tavil_codec_enable_mad(codec, false); + __tavil_codec_enable_mad(codec, false); break; } done: @@ -5789,6 +5815,11 @@ static const struct snd_kcontrol_new aif3_cap_mixer[] = { slim_tx_mixer_get, slim_tx_mixer_put), }; +static const struct snd_kcontrol_new aif4_mad_mixer[] = { + SOC_SINGLE_EXT("SLIM TX13", SND_SOC_NOPM, WCD934X_TX13, 1, 0, + slim_tx_mixer_get, slim_tx_mixer_put), +}; + WCD_DAPM_ENUM_EXT(slim_rx0, SND_SOC_NOPM, 0, slim_rx_mux_text, slim_rx_mux_get, slim_rx_mux_put); WCD_DAPM_ENUM_EXT(slim_rx1, SND_SOC_NOPM, 0, slim_rx_mux_text, @@ -6110,6 +6141,9 @@ static const struct snd_kcontrol_new mad_cpe1_switch = static const struct snd_kcontrol_new mad_cpe2_switch = SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0); +static const struct snd_kcontrol_new mad_brdcst_switch = + SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0); + static const struct snd_kcontrol_new adc_us_mux0_switch = SOC_DAPM_SINGLE("US_Switch", SND_SOC_NOPM, 0, 1, 0); @@ -6532,10 +6566,16 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = { aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)), SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0, aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)), + SND_SOC_DAPM_MIXER("AIF4_MAD Mixer", SND_SOC_NOPM, AIF4_MAD_TX, 0, + aif4_mad_mixer, ARRAY_SIZE(aif4_mad_mixer)), SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM, AIF4_VIFEED, 0, tavil_codec_enable_slimvi_feedback, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), + + SND_SOC_DAPM_AIF_OUT("AIF4 MAD", "AIF4 MAD TX", 0, + SND_SOC_NOPM, 0, 0), + SND_SOC_DAPM_MIXER("AIF4_VI Mixer", SND_SOC_NOPM, AIF4_VIFEED, 0, aif4_vi_mixer, ARRAY_SIZE(aif4_vi_mixer)), SND_SOC_DAPM_INPUT("VIINPUT"), @@ -6670,6 +6710,10 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = { WCD_DAPM_MUX("MAD_SEL MUX", 0, mad_sel), WCD_DAPM_MUX("MAD_INP MUX", 0, mad_inp_mux), + SND_SOC_DAPM_SWITCH_E("MAD_BROADCAST", SND_SOC_NOPM, 0, 0, + &mad_brdcst_switch, tavil_codec_ape_enable_mad, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_SWITCH_E("MAD_CPE1", SND_SOC_NOPM, 0, 0, &mad_cpe1_switch, tavil_codec_cpe_mad_ctl, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), @@ -6826,6 +6870,7 @@ static int tavil_get_channel_map(struct snd_soc_dai *dai, case AIF1_CAP: case AIF2_CAP: case AIF3_CAP: + case AIF4_MAD_TX: case AIF4_VIFEED: if (!tx_slot || !tx_num) { dev_err(tavil->dev, "%s: Invalid tx_slot 0x%pK or tx_num 0x%pK\n", @@ -6864,6 +6909,7 @@ static int tavil_set_channel_map(struct snd_soc_dai *dai, { struct tavil_priv *tavil; struct wcd9xxx *core; + struct wcd9xxx_codec_dai_data *dai_data = NULL; tavil = snd_soc_codec_get_drvdata(dai->codec); core = dev_get_drvdata(dai->codec->dev->parent); @@ -6878,6 +6924,12 @@ static int tavil_set_channel_map(struct snd_soc_dai *dai, wcd9xxx_init_slimslave(core, core->slim->laddr, tx_num, tx_slot, rx_num, rx_slot); + /* Reserve TX13 for MAD data channel */ + dai_data = &tavil->dai[AIF4_MAD_TX]; + if (dai_data) + list_add_tail(&core->tx_chs[WCD934X_TX13].list, + &dai_data->wcd9xxx_ch_list); + return 0; } @@ -7208,7 +7260,7 @@ static int tavil_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct tavil_priv *tavil = snd_soc_codec_get_drvdata(dai->codec); - int ret; + int ret = 0; dev_dbg(tavil->dev, "%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__, dai->name, dai->id, params_rate(params), @@ -7238,7 +7290,9 @@ static int tavil_hw_params(struct snd_pcm_substream *substream, tavil->dai[dai->id].rate = params_rate(params); break; case SNDRV_PCM_STREAM_CAPTURE: - ret = tavil_set_decimator_rate(dai, params_rate(params)); + if (dai->id != AIF4_MAD_TX) + ret = tavil_set_decimator_rate(dai, + params_rate(params)); if (ret) { dev_err(tavil->dev, "%s: cannot set TX Decimator rate: %d\n", __func__, ret); @@ -7395,6 +7449,20 @@ static struct snd_soc_dai_driver tavil_dai[] = { }, .ops = &tavil_vi_dai_ops, }, + { + .name = "tavil_mad1", + .id = AIF4_MAD_TX, + .capture = { + .stream_name = "AIF4 MAD TX", + .rates = SNDRV_PCM_RATE_16000, + .formats = WCD934X_FORMATS_S16_LE, + .rate_min = 16000, + .rate_max = 16000, + .channels_min = 1, + .channels_max = 1, + }, + .ops = &tavil_dai_ops, + }, }; static void tavil_codec_power_gate_digital_core(struct tavil_priv *tavil) diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c index 52ca82fba8e9..2012e4617ee1 100644 --- a/sound/soc/codecs/wcd9xxx-mbhc.c +++ b/sound/soc/codecs/wcd9xxx-mbhc.c @@ -4675,7 +4675,7 @@ int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc, schedule_delayed_work(&mbhc->mbhc_firmware_dwork, usecs_to_jiffies(FW_READ_TIMEOUT)); else - pr_debug("%s: Skipping to read mbhc fw, 0x%p %p\n", + pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n", __func__, mbhc->mbhc_fw, mbhc->mbhc_cal); } @@ -5073,7 +5073,7 @@ static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc, right = !!(r); dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__); - dev_dbg(codec->dev, "%s: l: %p, r: %p, left=%d, right=%d\n", __func__, + dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__, l, r, left, right); /* Remeasure V2 values */ diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c index e9f167fa643b..3aa9ac8d40b6 100644 --- a/sound/soc/codecs/wcd_cpe_core.c +++ b/sound/soc/codecs/wcd_cpe_core.c @@ -473,7 +473,7 @@ static int wcd_cpe_load_fw(struct wcd_cpe_core *core, bool load_segment; if (!core || !core->cpe_handle) { - pr_err("%s: Error CPE core %p\n", __func__, + pr_err("%s: Error CPE core %pK\n", __func__, core); return -EINVAL; } diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 28fd8930adb2..d7f4044b71ee 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -917,7 +917,7 @@ int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port, u8 num_port, if (!port || !ch_mask || !ch_rate || (num_port > WSA881X_MAX_SWR_PORTS)) { dev_err(codec->dev, - "%s: Invalid port=%p, ch_mask=%p, ch_rate=%p\n", + "%s: Invalid port=%pK, ch_mask=%pK, ch_rate=%pK\n", __func__, port, ch_mask, ch_rate); return -EINVAL; } diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c index 4cb62a6b3e7d..ee9dcacdd5c9 100644 --- a/sound/soc/msm/msm-dai-fe.c +++ b/sound/soc/msm/msm-dai-fe.c @@ -96,7 +96,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -108,8 +109,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { .rates = (SNDRV_PCM_RATE_8000_384000| SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE| - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 4, .rate_min = 8000, @@ -127,7 +129,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -140,7 +143,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -210,7 +214,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 6, .rate_min = 8000, @@ -222,7 +227,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { .rates = (SNDRV_PCM_RATE_8000_384000| SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE), + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -240,7 +247,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -259,7 +267,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -271,8 +280,9 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { .rates = (SNDRV_PCM_RATE_8000_48000| SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE| - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -290,7 +300,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -303,7 +314,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -321,7 +333,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -340,7 +353,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -353,7 +367,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2220,7 +2235,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2239,7 +2255,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2258,7 +2275,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2277,7 +2295,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2296,7 +2315,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2315,7 +2335,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, @@ -2334,7 +2355,8 @@ static struct snd_soc_dai_driver msm_fe_dais[] = { SNDRV_PCM_RATE_KNOT), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), .channels_min = 1, .channels_max = 8, .rate_min = 8000, diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c index d7b0e151f0bf..a2bd3be62175 100644 --- a/sound/soc/msm/msmcobalt.c +++ b/sound/soc/msm/msmcobalt.c @@ -71,6 +71,9 @@ #define WCN_CDC_SLIM_RX_CH_MAX 2 #define WCN_CDC_SLIM_TX_CH_MAX 3 +#define TDM_CHANNEL_MAX 8 +#define TDM_SLOT_OFFSET_MAX 8 + enum { SLIM_RX_0 = 0, SLIM_RX_1, @@ -170,6 +173,131 @@ struct msm_asoc_wcd93xx_codec { void (*mbhc_hs_detect_exit)(struct snd_soc_codec *codec); }; +enum { + TDM_0 = 0, + TDM_1, + TDM_2, + TDM_3, + TDM_4, + TDM_5, + TDM_6, + TDM_7, + TDM_PORT_MAX, +}; + +enum { + TDM_PRI = 0, + TDM_SEC, + TDM_TERT, + TDM_QUAT, + TDM_INTERFACE_MAX, +}; + +struct tdm_port { + u32 mode; + u32 channel; +}; + +/* TDM default config */ +static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = { + { /* PRI TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */ + }, + { /* SEC TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */ + }, + { /* TERT TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */ + }, + { /* QUAT TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* RX_7 */ + } +}; + +/* TDM default config */ +static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = { + { /* PRI TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */ + }, + { /* SEC TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */ + }, + { /* TERT TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */ + }, + { /* QUAT TDM */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_0 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_1 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_2 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_3 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_4 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_5 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_6 */ + {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, /* TX_7 */ + } +}; + +/*TDM default offset currently only supporting TDM_RX_0 and TDM_TX_0 */ +static unsigned int tdm_slot_offset[TDM_PORT_MAX][TDM_SLOT_OFFSET_MAX] = { + {0, 4, 8, 12, 16, 20, 24, 28},/* TX_0 | RX_0 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_1 | RX_1 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_2 | RX_2 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_3 | RX_3 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_4 | RX_4 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_5 | RX_5 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_6 | RX_6 */ + {AFE_SLOT_MAPPING_OFFSET_INVALID},/* TX_7 | RX_7 */ +}; + /* Default configuration of slimbus channels */ static struct dev_config slim_rx_cfg[] = { [SLIM_RX_0] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1}, @@ -254,7 +382,8 @@ static const char *const slim_tx_ch_text[] = {"One", "Two", "Three", "Four", "Five", "Six", "Seven", "Eight"}; static const char *const vi_feed_ch_text[] = {"One", "Two"}; -static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE"}; +static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE", + "S32_LE"}; static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"}; static char const *slim_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32", "KHZ_44P1", "KHZ_48", @@ -269,9 +398,15 @@ static char const *ch_text[] = {"Two", "Three", "Four", "Five", static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025", "KHZ_16", "KHZ_22P05", "KHZ_32", "KHZ_44P1", "KHZ_48", - "KHZ_96", "KHZ_192"}; + "KHZ_96", "KHZ_192", "KHZ_384"}; static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96", "KHZ_192"}; +static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four", + "Five", "Six", "Seven", "Eight"}; +static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"}; +static char const *tdm_sample_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32", + "KHZ_44P1", "KHZ_48", "KHZ_96", + "KHZ_192", "KHZ_352P8", "KHZ_384"}; static const char *const auxpcm_rate_text[] = {"KHZ_8", "KHZ_16"}; static char const *mi2s_rate_text[] = {"KHZ_8", "KHZ_16", "KHZ_32", "KHZ_44P1", "KHZ_48", @@ -308,6 +443,12 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text); static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text); static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate, ext_disp_sample_rate_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_chs, tdm_ch_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_format, tdm_bit_format_text); +static SOC_ENUM_SINGLE_EXT_DECL(tdm_rx_sample_rate, tdm_sample_rate_text); static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text); static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text); static SOC_ENUM_SINGLE_EXT_DECL(tert_aux_pcm_rx_sample_rate, auxpcm_rate_text); @@ -511,6 +652,9 @@ static int slim_get_bit_format_val(int bit_format) int val = 0; switch (bit_format) { + case SNDRV_PCM_FORMAT_S32_LE: + val = 3; + break; case SNDRV_PCM_FORMAT_S24_3LE: val = 2; break; @@ -539,6 +683,9 @@ static int slim_get_bit_format(int val) case 2: bit_fmt = SNDRV_PCM_FORMAT_S24_3LE; break; + case 3: + bit_fmt = SNDRV_PCM_FORMAT_S32_LE; + break; default: bit_fmt = SNDRV_PCM_FORMAT_S16_LE; break; @@ -876,6 +1023,9 @@ static int usb_audio_rx_sample_rate_get(struct snd_kcontrol *kcontrol, int sample_rate_val; switch (usb_rx_cfg.sample_rate) { + case SAMPLING_RATE_384KHZ: + sample_rate_val = 9; + break; case SAMPLING_RATE_192KHZ: sample_rate_val = 8; break; @@ -916,6 +1066,9 @@ static int usb_audio_rx_sample_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (ucontrol->value.integer.value[0]) { + case 9: + usb_rx_cfg.sample_rate = SAMPLING_RATE_384KHZ; + break; case 8: usb_rx_cfg.sample_rate = SAMPLING_RATE_192KHZ; break; @@ -958,6 +1111,9 @@ static int usb_audio_rx_format_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (usb_rx_cfg.bit_format) { + case SNDRV_PCM_FORMAT_S32_LE: + ucontrol->value.integer.value[0] = 3; + break; case SNDRV_PCM_FORMAT_S24_3LE: ucontrol->value.integer.value[0] = 2; break; @@ -982,6 +1138,9 @@ static int usb_audio_rx_format_put(struct snd_kcontrol *kcontrol, int rc = 0; switch (ucontrol->value.integer.value[0]) { + case 3: + usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE; + break; case 2: usb_rx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE; break; @@ -1024,6 +1183,9 @@ static int usb_audio_tx_sample_rate_get(struct snd_kcontrol *kcontrol, int sample_rate_val; switch (usb_tx_cfg.sample_rate) { + case SAMPLING_RATE_384KHZ: + sample_rate_val = 9; + break; case SAMPLING_RATE_192KHZ: sample_rate_val = 8; break; @@ -1066,6 +1228,9 @@ static int usb_audio_tx_sample_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (ucontrol->value.integer.value[0]) { + case 9: + usb_tx_cfg.sample_rate = SAMPLING_RATE_384KHZ; + break; case 8: usb_tx_cfg.sample_rate = SAMPLING_RATE_192KHZ; break; @@ -1108,6 +1273,9 @@ static int usb_audio_tx_format_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { switch (usb_tx_cfg.bit_format) { + case SNDRV_PCM_FORMAT_S32_LE: + ucontrol->value.integer.value[0] = 3; + break; case SNDRV_PCM_FORMAT_S24_3LE: ucontrol->value.integer.value[0] = 2; break; @@ -1132,6 +1300,9 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol, int rc = 0; switch (ucontrol->value.integer.value[0]) { + case 3: + usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S32_LE; + break; case 2: usb_tx_cfg.bit_format = SNDRV_PCM_FORMAT_S24_3LE; break; @@ -1328,6 +1499,45 @@ static int proxy_rx_ch_put(struct snd_kcontrol *kcontrol, return 1; } +static int tdm_get_sample_rate(int value) +{ + int sample_rate = 0; + + switch (value) { + case 0: + sample_rate = SAMPLING_RATE_8KHZ; + break; + case 1: + sample_rate = SAMPLING_RATE_16KHZ; + break; + case 2: + sample_rate = SAMPLING_RATE_32KHZ; + break; + case 3: + sample_rate = SAMPLING_RATE_44P1KHZ; + break; + case 4: + sample_rate = SAMPLING_RATE_48KHZ; + break; + case 5: + sample_rate = SAMPLING_RATE_96KHZ; + break; + case 6: + sample_rate = SAMPLING_RATE_192KHZ; + break; + case 7: + sample_rate = SAMPLING_RATE_352P8KHZ; + break; + case 8: + sample_rate = SAMPLING_RATE_384KHZ; + break; + default: + sample_rate = SAMPLING_RATE_48KHZ; + break; + } + return sample_rate; +} + static int aux_pcm_get_sample_rate(int value) { int sample_rate; @@ -1344,6 +1554,45 @@ static int aux_pcm_get_sample_rate(int value) return sample_rate; } +static int tdm_get_sample_rate_val(int sample_rate) +{ + int sample_rate_val = 0; + + switch (sample_rate) { + case SAMPLING_RATE_8KHZ: + sample_rate_val = 0; + break; + case SAMPLING_RATE_16KHZ: + sample_rate_val = 1; + break; + case SAMPLING_RATE_32KHZ: + sample_rate_val = 2; + break; + case SAMPLING_RATE_44P1KHZ: + sample_rate_val = 3; + break; + case SAMPLING_RATE_48KHZ: + sample_rate_val = 4; + break; + case SAMPLING_RATE_96KHZ: + sample_rate_val = 5; + break; + case SAMPLING_RATE_192KHZ: + sample_rate_val = 6; + break; + case SAMPLING_RATE_352P8KHZ: + sample_rate_val = 7; + break; + case SAMPLING_RATE_384KHZ: + sample_rate_val = 8; + break; + default: + sample_rate_val = 4; + break; + } + return sample_rate_val; +} + static int aux_pcm_get_sample_rate_val(int sample_rate) { int sample_rate_val; @@ -1360,6 +1609,361 @@ static int aux_pcm_get_sample_rate_val(int sample_rate) return sample_rate_val; } +static int tdm_get_port_idx(struct snd_kcontrol *kcontrol, + struct tdm_port *port) +{ + if (port) { + if (strnstr(kcontrol->id.name, "PRI", + sizeof(kcontrol->id.name))) { + port->mode = TDM_PRI; + } else if (strnstr(kcontrol->id.name, "SEC", + sizeof(kcontrol->id.name))) { + port->mode = TDM_SEC; + } else if (strnstr(kcontrol->id.name, "TERT", + sizeof(kcontrol->id.name))) { + port->mode = TDM_TERT; + } else if (strnstr(kcontrol->id.name, "QUAT", + sizeof(kcontrol->id.name))) { + port->mode = TDM_QUAT; + } else { + pr_err("%s: unsupported mode in: %s", + __func__, kcontrol->id.name); + return -EINVAL; + } + + if (strnstr(kcontrol->id.name, "RX_0", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_0", + sizeof(kcontrol->id.name))) { + port->channel = TDM_0; + } else if (strnstr(kcontrol->id.name, "RX_1", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_1", + sizeof(kcontrol->id.name))) { + port->channel = TDM_1; + } else if (strnstr(kcontrol->id.name, "RX_2", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_2", + sizeof(kcontrol->id.name))) { + port->channel = TDM_2; + } else if (strnstr(kcontrol->id.name, "RX_3", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_3", + sizeof(kcontrol->id.name))) { + port->channel = TDM_3; + } else if (strnstr(kcontrol->id.name, "RX_4", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_4", + sizeof(kcontrol->id.name))) { + port->channel = TDM_4; + } else if (strnstr(kcontrol->id.name, "RX_5", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_5", + sizeof(kcontrol->id.name))) { + port->channel = TDM_5; + } else if (strnstr(kcontrol->id.name, "RX_6", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_6", + sizeof(kcontrol->id.name))) { + port->channel = TDM_6; + } else if (strnstr(kcontrol->id.name, "RX_7", + sizeof(kcontrol->id.name)) || + strnstr(kcontrol->id.name, "TX_7", + sizeof(kcontrol->id.name))) { + port->channel = TDM_7; + } else { + pr_err("%s: unsupported channel in: %s", + __func__, kcontrol->id.name); + return -EINVAL; + } + } else + return -EINVAL; + return 0; +} + +static int tdm_rx_sample_rate_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val( + tdm_rx_cfg[port.mode][port.channel].sample_rate); + + pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].sample_rate, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_rx_sample_rate_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_rx_cfg[port.mode][port.channel].sample_rate = + tdm_get_sample_rate(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: tdm_rx_sample_rate = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].sample_rate, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_tx_sample_rate_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + ucontrol->value.enumerated.item[0] = tdm_get_sample_rate_val( + tdm_tx_cfg[port.mode][port.channel].sample_rate); + + pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].sample_rate, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_tx_sample_rate_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_tx_cfg[port.mode][port.channel].sample_rate = + tdm_get_sample_rate(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: tdm_tx_sample_rate = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].sample_rate, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_get_format(int value) +{ + int format = 0; + + switch (value) { + case 0: + format = SNDRV_PCM_FORMAT_S16_LE; + break; + case 1: + format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 2: + format = SNDRV_PCM_FORMAT_S32_LE; + break; + default: + format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + return format; +} + +static int tdm_get_format_val(int format) +{ + int value = 0; + + switch (format) { + case SNDRV_PCM_FORMAT_S16_LE: + value = 0; + break; + case SNDRV_PCM_FORMAT_S24_LE: + value = 1; + break; + case SNDRV_PCM_FORMAT_S32_LE: + value = 2; + break; + default: + value = 0; + break; + } + return value; +} + +static int tdm_rx_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + ucontrol->value.enumerated.item[0] = tdm_get_format_val( + tdm_rx_cfg[port.mode][port.channel].bit_format); + + pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].bit_format, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_rx_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_rx_cfg[port.mode][port.channel].bit_format = + tdm_get_format(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: tdm_rx_bit_format = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].bit_format, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_tx_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + ucontrol->value.enumerated.item[0] = tdm_get_format_val( + tdm_tx_cfg[port.mode][port.channel].bit_format); + + pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].bit_format, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_tx_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_tx_cfg[port.mode][port.channel].bit_format = + tdm_get_format(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: tdm_tx_bit_format = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].bit_format, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_rx_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + + ucontrol->value.enumerated.item[0] = + tdm_rx_cfg[port.mode][port.channel].channels - 1; + + pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].channels - 1, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_rx_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_rx_cfg[port.mode][port.channel].channels = + ucontrol->value.enumerated.item[0] + 1; + + pr_debug("%s: tdm_rx_ch = %d, item = %d\n", __func__, + tdm_rx_cfg[port.mode][port.channel].channels, + ucontrol->value.enumerated.item[0] + 1); + } + return ret; +} + +static int tdm_tx_ch_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + ucontrol->value.enumerated.item[0] = + tdm_tx_cfg[port.mode][port.channel].channels - 1; + + pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].channels - 1, + ucontrol->value.enumerated.item[0]); + } + return ret; +} + +static int tdm_tx_ch_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct tdm_port port; + int ret = tdm_get_port_idx(kcontrol, &port); + + if (ret) { + pr_err("%s: unsupported control: %s", + __func__, kcontrol->id.name); + } else { + tdm_tx_cfg[port.mode][port.channel].channels = + ucontrol->value.enumerated.item[0] + 1; + + pr_debug("%s: tdm_tx_ch = %d, item = %d\n", __func__, + tdm_tx_cfg[port.mode][port.channel].channels, + ucontrol->value.enumerated.item[0] + 1); + } + return ret; +} + static int aux_pcm_get_port_idx(struct snd_kcontrol *kcontrol) { int idx; @@ -1758,6 +2362,24 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate, ext_disp_rx_sample_rate_get, ext_disp_rx_sample_rate_put), + SOC_ENUM_EXT("TERT_TDM_RX_0 SampleRate", tdm_rx_sample_rate, + tdm_rx_sample_rate_get, + tdm_rx_sample_rate_put), + SOC_ENUM_EXT("TERT_TDM_TX_0 SampleRate", tdm_tx_sample_rate, + tdm_tx_sample_rate_get, + tdm_tx_sample_rate_put), + SOC_ENUM_EXT("TERT_TDM_RX_0 Format", tdm_rx_format, + tdm_rx_format_get, + tdm_rx_format_put), + SOC_ENUM_EXT("TERT_TDM_TX_0 Format", tdm_tx_format, + tdm_tx_format_get, + tdm_tx_format_put), + SOC_ENUM_EXT("TERT_TDM_RX_0 Channels", tdm_rx_chs, + tdm_rx_ch_get, + tdm_rx_ch_put), + SOC_ENUM_EXT("TERT_TDM_TX_0 Channels", tdm_tx_chs, + tdm_tx_ch_get, + tdm_tx_ch_put), SOC_ENUM_EXT("PRIM_AUX_PCM_RX SampleRate", prim_aux_pcm_rx_sample_rate, aux_pcm_rx_sample_rate_get, aux_pcm_rx_sample_rate_put), @@ -2095,6 +2717,22 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, rate->min = rate->max = SAMPLING_RATE_48KHZ; break; + case MSM_BACKEND_DAI_TERT_TDM_RX_0: + channels->min = channels->max = + tdm_rx_cfg[TDM_TERT][TDM_0].channels; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + tdm_rx_cfg[TDM_TERT][TDM_0].bit_format); + rate->min = rate->max = tdm_rx_cfg[TDM_TERT][TDM_0].sample_rate; + break; + + case MSM_BACKEND_DAI_TERT_TDM_TX_0: + channels->min = channels->max = + tdm_tx_cfg[TDM_TERT][TDM_0].channels; + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + tdm_tx_cfg[TDM_TERT][TDM_0].bit_format); + rate->min = rate->max = tdm_tx_cfg[TDM_TERT][TDM_0].sample_rate; + break; + case MSM_BACKEND_DAI_AUXPCM_RX: rate->min = rate->max = aux_pcm_rx_cfg[PRIM_AUX_PCM].sample_rate; @@ -2612,7 +3250,7 @@ static void *def_tasha_mbhc_cal(void) return NULL; #define S(X, Y) ((WCD_MBHC_CAL_PLUG_TYPE_PTR(tasha_wcd_cal)->X) = (Y)) - S(v_hs_max, 1500); + S(v_hs_max, 1600); #undef S #define S(X, Y) ((WCD_MBHC_CAL_BTN_DET_PTR(tasha_wcd_cal)->X) = (Y)) S(num_btn, WCD_MBHC_DEF_BUTTONS); @@ -3176,6 +3814,157 @@ static struct snd_soc_ops msm_aux_pcm_be_ops = { .shutdown = msm_aux_pcm_snd_shutdown, }; +static unsigned int tdm_param_set_slot_mask(u16 port_id, int slot_width, + int slots) +{ + unsigned int slot_mask = 0; + int i, j; + unsigned int *slot_offset; + + for (i = TDM_0; i < TDM_PORT_MAX; i++) { + slot_offset = tdm_slot_offset[i]; + + for (j = 0; j < TDM_SLOT_OFFSET_MAX; j++) { + if (slot_offset[j] != AFE_SLOT_MAPPING_OFFSET_INVALID) + slot_mask |= + (1 << ((slot_offset[j] * 8) / slot_width)); + else + break; + } + } + + return slot_mask; +} + +static int msm_tdm_snd_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + struct snd_soc_pcm_runtime *rtd = substream->private_data; + struct snd_soc_dai *cpu_dai = rtd->cpu_dai; + int ret = 0; + int channels, slot_width, slots; + unsigned int slot_mask; + unsigned int *slot_offset; + int offset_channels = 0; + int i; + + pr_debug("%s: dai id = 0x%x\n", __func__, cpu_dai->id); + + channels = params_channels(params); + switch (channels) { + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S32_LE: + case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S16_LE: + /* + * up to 8 channels HW config should + * use 32 bit slot width for max support of + * stream bit width. (slot_width > bit_width) + */ + slot_width = 32; + break; + default: + pr_err("%s: invalid param format 0x%x\n", + __func__, params_format(params)); + return -EINVAL; + } + slots = 8; + slot_mask = tdm_param_set_slot_mask(cpu_dai->id, + slot_width, + slots); + if (!slot_mask) { + pr_err("%s: invalid slot_mask 0x%x\n", + __func__, slot_mask); + return -EINVAL; + } + break; + default: + pr_err("%s: invalid param channels %d\n", + __func__, channels); + return -EINVAL; + } + /* currently only supporting TDM_RX_0 and TDM_TX_0 */ + switch (cpu_dai->id) { + case AFE_PORT_ID_PRIMARY_TDM_RX: + case AFE_PORT_ID_SECONDARY_TDM_RX: + case AFE_PORT_ID_TERTIARY_TDM_RX: + case AFE_PORT_ID_QUATERNARY_TDM_RX: + case AFE_PORT_ID_PRIMARY_TDM_TX: + case AFE_PORT_ID_SECONDARY_TDM_TX: + case AFE_PORT_ID_TERTIARY_TDM_TX: + case AFE_PORT_ID_QUATERNARY_TDM_TX: + slot_offset = tdm_slot_offset[TDM_0]; + break; + default: + pr_err("%s: dai id 0x%x not supported\n", + __func__, cpu_dai->id); + return -EINVAL; + } + + for (i = 0; i < TDM_SLOT_OFFSET_MAX; i++) { + if (slot_offset[i] != AFE_SLOT_MAPPING_OFFSET_INVALID) + offset_channels++; + else + break; + } + + if (offset_channels == 0) { + pr_err("%s: slot offset not supported, offset_channels %d\n", + __func__, offset_channels); + return -EINVAL; + } + + if (channels > offset_channels) { + pr_err("%s: channels %d exceed offset_channels %d\n", + __func__, channels, offset_channels); + return -EINVAL; + } + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + ret = snd_soc_dai_set_tdm_slot(cpu_dai, 0, slot_mask, + slots, slot_width); + if (ret < 0) { + pr_err("%s: failed to set tdm slot, err:%d\n", + __func__, ret); + goto end; + } + + ret = snd_soc_dai_set_channel_map(cpu_dai, 0, NULL, + channels, slot_offset); + if (ret < 0) { + pr_err("%s: failed to set channel map, err:%d\n", + __func__, ret); + goto end; + } + } else { + ret = snd_soc_dai_set_tdm_slot(cpu_dai, slot_mask, 0, + slots, slot_width); + if (ret < 0) { + pr_err("%s: failed to set tdm slot, err:%d\n", + __func__, ret); + goto end; + } + + ret = snd_soc_dai_set_channel_map(cpu_dai, channels, + slot_offset, 0, NULL); + if (ret < 0) { + pr_err("%s: failed to set channel map, err:%d\n", + __func__, ret); + goto end; + } + } +end: + return ret; +} + static struct snd_soc_ops msm_be_ops = { .hw_params = msm_snd_hw_params, }; @@ -3192,6 +3981,10 @@ static struct snd_soc_ops msm_wcn_ops = { .hw_params = msm_wcn_hw_params, }; +static struct snd_soc_ops msm_tdm_be_ops = { + .hw_params = msm_tdm_snd_hw_params +}; + /* Digital audio interface glue - connects codec <---> CPU */ static struct snd_soc_dai_link msm_common_dai_links[] = { /* FrontEnd DAI Links */ @@ -3992,6 +4785,34 @@ static struct snd_soc_dai_link msm_common_be_dai_links[] = { .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, + { + .name = LPASS_BE_TERT_TDM_RX_0, + .stream_name = "Tertiary TDM0 Playback", + .cpu_dai_name = "msm-dai-q6-tdm.36896", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-rx", + .no_pcm = 1, + .dpcm_playback = 1, + .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0, + .be_hw_params_fixup = msm_be_hw_params_fixup, + .ops = &msm_tdm_be_ops, + .ignore_suspend = 1, + }, + { + .name = LPASS_BE_TERT_TDM_TX_0, + .stream_name = "Tertiary TDM0 Capture", + .cpu_dai_name = "msm-dai-q6-tdm.36897", + .platform_name = "msm-pcm-routing", + .codec_name = "msm-stub-codec.1", + .codec_dai_name = "msm-stub-tx", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0, + .be_hw_params_fixup = msm_be_hw_params_fixup, + .ops = &msm_tdm_be_ops, + .ignore_suspend = 1, + }, }; static struct snd_soc_dai_link msm_tasha_be_dai_links[] = { @@ -4290,6 +5111,21 @@ static struct snd_soc_dai_link msm_tavil_be_dai_links[] = { .ignore_pmdown_time = 1, .ignore_suspend = 1, }, + /* MAD BE */ + { + .name = LPASS_BE_SLIMBUS_5_TX, + .stream_name = "Slimbus5 Capture", + .cpu_dai_name = "msm-dai-q6-dev.16395", + .platform_name = "msm-pcm-routing", + .codec_name = "tavil_codec", + .codec_dai_name = "tavil_mad1", + .no_pcm = 1, + .dpcm_capture = 1, + .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX, + .be_hw_params_fixup = msm_be_hw_params_fixup, + .ops = &msm_be_ops, + .ignore_suspend = 1, + }, { .name = LPASS_BE_SLIMBUS_6_RX, .stream_name = "Slimbus6 Playback", diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c index 841bb5bce13f..770bd12eb501 100644 --- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c @@ -709,6 +709,10 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream, } switch (prtd->codec_param.codec.format) { + case SNDRV_PCM_FORMAT_S32_LE: + bit_width = 32; + sample_word_size = 32; + break; case SNDRV_PCM_FORMAT_S24_LE: bit_width = 24; sample_word_size = 32; @@ -723,14 +727,16 @@ static int msm_compr_send_media_format_block(struct snd_compr_stream *cstream, sample_word_size = 16; break; } - ret = q6asm_media_format_block_pcm_format_support_v3( + ret = q6asm_media_format_block_pcm_format_support_v4( prtd->audio_client, prtd->sample_rate, prtd->num_channels, bit_width, stream_id, use_default_chmap, chmap, - sample_word_size); + sample_word_size, + ASM_LITTLE_ENDIAN, + DEFAULT_QF); if (ret < 0) pr_err("%s: CMD Format block failed\n", __func__); @@ -1010,7 +1016,7 @@ static int msm_compr_configure_dsp(struct snd_compr_stream *cstream) } else { pr_debug("%s: stream_id %d bits_per_sample %d\n", __func__, ac->stream_id, bits_per_sample); - ret = q6asm_stream_open_write_v3(ac, + ret = q6asm_stream_open_write_v4(ac, prtd->codec, bits_per_sample, ac->stream_id, prtd->gapless_state.use_dsp_gapless_mode); @@ -1942,7 +1948,7 @@ static int msm_compr_trigger(struct snd_compr_stream *cstream, int cmd) pr_debug("%s: open_write stream_id %d bits_per_sample %d", __func__, stream_id, bits_per_sample); - rc = q6asm_stream_open_write_v3(prtd->audio_client, + rc = q6asm_stream_open_write_v4(prtd->audio_client, prtd->codec, bits_per_sample, stream_id, prtd->gapless_state.use_dsp_gapless_mode); diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c index a89d88eac41e..7eb4a10b83c7 100644 --- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c @@ -173,6 +173,7 @@ struct msm_dai_q6_dai_data { u32 bitwidth; u32 cal_mode; u32 afe_in_channels; + u16 afe_in_bitformat; struct afe_enc_config enc_config; union afe_port_config port_config; }; @@ -1417,11 +1418,20 @@ static int msm_dai_q6_prepare(struct snd_pcm_substream *substream, if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) { if (dai_data->enc_config.format != ENC_FMT_NONE) { + int bitwidth = 0; + + if (dai_data->afe_in_bitformat == + SNDRV_PCM_FORMAT_S24_LE) + bitwidth = 24; + else if (dai_data->afe_in_bitformat == + SNDRV_PCM_FORMAT_S16_LE) + bitwidth = 16; pr_debug("%s: calling AFE_PORT_START_V2 with enc_format: %d\n", __func__, dai_data->enc_config.format); rc = afe_port_start_v2(dai->id, &dai_data->port_config, dai_data->rate, dai_data->afe_in_channels, + bitwidth, &dai_data->enc_config); if (rc < 0) pr_err("%s: afe_port_start_v2 failed error: %d\n", @@ -1607,8 +1617,13 @@ static int msm_dai_q6_usb_audio_hw_params(struct snd_pcm_hw_params *params, dai_data->port_config.usb_audio.bit_width = 16; break; case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S24_3LE: dai_data->port_config.usb_audio.bit_width = 24; break; + case SNDRV_PCM_FORMAT_S32_LE: + dai_data->port_config.usb_audio.bit_width = 32; + break; + default: dev_err(dai->dev, "%s: invalid format %d\n", __func__, params_format(params)); @@ -2140,6 +2155,12 @@ static const struct soc_enum afe_input_chs_enum[] = { SOC_ENUM_SINGLE_EXT(3, afe_input_chs_text), }; +static const char *const afe_input_bit_format_text[] = {"S16_LE", "S24_LE"}; + +static const struct soc_enum afe_input_bit_format_enum[] = { + SOC_ENUM_SINGLE_EXT(2, afe_input_bit_format_text), +}; + static int msm_dai_q6_afe_input_channel_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -2168,6 +2189,58 @@ static int msm_dai_q6_afe_input_channel_put(struct snd_kcontrol *kcontrol, return 0; } +static int msm_dai_q6_afe_input_bit_format_get( + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data; + + if (!dai_data) { + pr_err("%s: Invalid dai data\n", __func__); + return -EINVAL; + } + + switch (dai_data->afe_in_bitformat) { + case SNDRV_PCM_FORMAT_S24_LE: + ucontrol->value.integer.value[0] = 1; + break; + case SNDRV_PCM_FORMAT_S16_LE: + default: + ucontrol->value.integer.value[0] = 0; + break; + } + pr_debug("%s: afe input bit format : %ld\n", + __func__, ucontrol->value.integer.value[0]); + + return 0; +} + +static int msm_dai_q6_afe_input_bit_format_put( + struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + struct msm_dai_q6_dai_data *dai_data = kcontrol->private_data; + + if (!dai_data) { + pr_err("%s: Invalid dai data\n", __func__); + return -EINVAL; + } + switch (ucontrol->value.integer.value[0]) { + case 1: + dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S24_LE; + break; + case 0: + default: + dai_data->afe_in_bitformat = SNDRV_PCM_FORMAT_S16_LE; + break; + } + pr_debug("%s: updating afe input bit format : %d\n", + __func__, dai_data->afe_in_bitformat); + + return 0; +} + + static const struct snd_kcontrol_new afe_enc_config_controls[] = { { .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | @@ -2181,6 +2254,9 @@ static const struct snd_kcontrol_new afe_enc_config_controls[] = { SOC_ENUM_EXT("AFE Input Channels", afe_input_chs_enum[0], msm_dai_q6_afe_input_channel_get, msm_dai_q6_afe_input_channel_put), + SOC_ENUM_EXT("AFE Input Bit Format", afe_input_bit_format_enum[0], + msm_dai_q6_afe_input_bit_format_get, + msm_dai_q6_afe_input_bit_format_put), }; static const char * const afe_cal_mode_text[] = { @@ -2570,11 +2646,12 @@ static struct snd_soc_dai_driver msm_dai_q6_usb_rx_dai = { SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 1, .channels_max = 8, - .rate_max = 192000, + .rate_max = 384000, .rate_min = 8000, }, .ops = &msm_dai_q6_ops, @@ -2591,11 +2668,12 @@ static struct snd_soc_dai_driver msm_dai_q6_usb_tx_dai = { SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE, + SNDRV_PCM_RATE_192000 | SNDRV_PCM_RATE_384000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | + SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 1, .channels_max = 8, - .rate_max = 192000, + .rate_max = 384000, .rate_min = 8000, }, .ops = &msm_dai_q6_ops, @@ -5825,11 +5903,6 @@ static int msm_dai_q6_tdm_hw_params(struct snd_pcm_substream *substream, pr_debug("%s: dev_name: %s\n", __func__, dev_name(dai->dev)); - if (params_rate(params) != 48000) { - dev_err(dai->dev, "%s: invalid param rate %d\n", - __func__, params_rate(params)); - return -EINVAL; - } if ((params_channels(params) == 0) || (params_channels(params) > 8)) { dev_err(dai->dev, "%s: invalid param channels %d\n", diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c index 4e93780c4da0..c5baf0e63732 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c @@ -67,10 +67,11 @@ static struct snd_pcm_hardware msm_pcm_hardware_capture = { SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), - .rates = SNDRV_PCM_RATE_8000_48000, + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .rates = SNDRV_PCM_RATE_8000_384000, .rate_min = 8000, - .rate_max = 48000, + .rate_max = 384000, .channels_min = 1, .channels_max = 4, .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS * @@ -90,10 +91,11 @@ static struct snd_pcm_hardware msm_pcm_hardware_playback = { SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | - SNDRV_PCM_FMTBIT_S24_3LE), - .rates = SNDRV_PCM_RATE_8000_192000, + SNDRV_PCM_FMTBIT_S24_3LE | + SNDRV_PCM_FMTBIT_S32_LE), + .rates = SNDRV_PCM_RATE_8000_384000, .rate_min = 8000, - .rate_max = 192000, + .rate_max = 384000, .channels_min = 1, .channels_max = 8, .buffer_bytes_max = PLAYBACK_MAX_NUM_PERIODS * @@ -108,7 +110,7 @@ static struct snd_pcm_hardware msm_pcm_hardware_playback = { /* Conventional and unconventional sample rate supported */ static unsigned int supported_sample_rates[] = { 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, - 88200, 96000, 176400, 192000 + 88200, 96000, 176400, 192000, 384000 }; static struct snd_pcm_hw_constraint_list constraints_sample_rates = { @@ -313,6 +315,10 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) pr_debug("%s: perf: %x\n", __func__, pdata->perf_mode); switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S32_LE: + bits_per_sample = 32; + sample_word_size = 32; + break; case SNDRV_PCM_FORMAT_S24_LE: bits_per_sample = 24; sample_word_size = 32; @@ -328,7 +334,7 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) break; } - ret = q6asm_open_write_v3(prtd->audio_client, + ret = q6asm_open_write_v4(prtd->audio_client, FORMAT_LINEAR_PCM, bits_per_sample); if (ret < 0) { @@ -353,11 +359,12 @@ static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream) return ret; } - ret = q6asm_media_format_block_multi_ch_pcm_v3( + ret = q6asm_media_format_block_multi_ch_pcm_v4( prtd->audio_client, runtime->rate, runtime->channels, !prtd->set_channel_map, prtd->channel_map, bits_per_sample, - sample_word_size); + sample_word_size, ASM_LITTLE_ENDIAN, + DEFAULT_QF); if (ret < 0) pr_info("%s: CMD Format block failed\n", __func__); @@ -402,6 +409,8 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) if ((params_format(params) == SNDRV_PCM_FORMAT_S24_LE) || (params_format(params) == SNDRV_PCM_FORMAT_S24_3LE)) bits_per_sample = 24; + else if (params_format(params) == SNDRV_PCM_FORMAT_S32_LE) + bits_per_sample = 32; /* ULL mode is not supported in capture path */ if (pdata->perf_mode == LEGACY_PCM_MODE) @@ -413,7 +422,7 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) __func__, params_channels(params), prtd->audio_client->perf_mode); - ret = q6asm_open_read_v3(prtd->audio_client, FORMAT_LINEAR_PCM, + ret = q6asm_open_read_v4(prtd->audio_client, FORMAT_LINEAR_PCM, bits_per_sample); if (ret < 0) { pr_err("%s: q6asm_open_read failed\n", __func__); @@ -459,6 +468,10 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) return 0; switch (runtime->format) { + case SNDRV_PCM_FORMAT_S32_LE: + bits_per_sample = 32; + sample_word_size = 32; + break; case SNDRV_PCM_FORMAT_S24_LE: bits_per_sample = 24; sample_word_size = 32; @@ -477,11 +490,13 @@ static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream) pr_debug("%s: Samp_rate = %d Channel = %d bit width = %d, word size = %d\n", __func__, prtd->samp_rate, prtd->channel_mode, bits_per_sample, sample_word_size); - ret = q6asm_enc_cfg_blk_pcm_format_support_v3(prtd->audio_client, + ret = q6asm_enc_cfg_blk_pcm_format_support_v4(prtd->audio_client, prtd->samp_rate, prtd->channel_mode, bits_per_sample, - sample_word_size); + sample_word_size, + ASM_LITTLE_ENDIAN, + DEFAULT_QF); if (ret < 0) pr_debug("%s: cmd cfg pcm was block failed", __func__); diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h index 72418ea56bb9..8fe31394eef0 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h +++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.h @@ -59,11 +59,11 @@ struct msm_audio_in_frame_info { #define PLAYBACK_MIN_NUM_PERIODS 2 #define PLAYBACK_MAX_NUM_PERIODS 8 -#define PLAYBACK_MAX_PERIOD_SIZE 12288 +#define PLAYBACK_MAX_PERIOD_SIZE 122880 #define PLAYBACK_MIN_PERIOD_SIZE 128 #define CAPTURE_MIN_NUM_PERIODS 2 #define CAPTURE_MAX_NUM_PERIODS 8 -#define CAPTURE_MAX_PERIOD_SIZE 61440 +#define CAPTURE_MAX_PERIOD_SIZE 122880 #define CAPTURE_MIN_PERIOD_SIZE 320 struct msm_audio { diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c index af5a99e56afc..be0a8b2e3abe 100644 --- a/sound/soc/msm/qdsp6v2/q6afe.c +++ b/sound/soc/msm/qdsp6v2/q6afe.c @@ -2645,8 +2645,9 @@ exit: } static int q6afe_send_enc_config(u16 port_id, - union afe_enc_config_data *cfg, u32 format, - union afe_port_config afe_config, u16 afe_in_channels) + union afe_enc_config_data *cfg, u32 format, + union afe_port_config afe_config, + u16 afe_in_channels, u16 afe_in_bit_width) { struct afe_audioif_config_command config; int index; @@ -2728,8 +2729,13 @@ static int q6afe_send_enc_config(u16 port_id, config.pdata.param_id = AFE_PARAM_ID_PORT_MEDIA_TYPE; config.port.media_type.minor_version = AFE_API_VERSION_PORT_MEDIA_TYPE; config.port.media_type.sample_rate = afe_config.slim_sch.sample_rate; - config.port.media_type.bit_width = afe_config.slim_sch.bit_width; - if (afe_in_channels != 0) + if (afe_in_bit_width) + config.port.media_type.bit_width = afe_in_bit_width; + else + config.port.media_type.bit_width = + afe_config.slim_sch.bit_width; + + if (afe_in_channels) config.port.media_type.num_channels = afe_in_channels; else config.port.media_type.num_channels = @@ -2749,8 +2755,8 @@ exit: } static int __afe_port_start(u16 port_id, union afe_port_config *afe_config, - u32 rate, u16 afe_in_channels, - union afe_enc_config_data *cfg, u32 enc_format) + u32 rate, u16 afe_in_channels, u16 afe_in_bit_width, + union afe_enc_config_data *cfg, u32 enc_format) { struct afe_audioif_config_command config; int ret = 0; @@ -2989,7 +2995,8 @@ static int __afe_port_start(u16 port_id, union afe_port_config *afe_config, pr_debug("%s: Found AFE encoder support for SLIMBUS enc_format = %d\n", __func__, enc_format); ret = q6afe_send_enc_config(port_id, cfg, enc_format, - *afe_config, afe_in_channels); + *afe_config, afe_in_channels, + afe_in_bit_width); if (ret) { pr_err("%s: AFE encoder config for port 0x%x failed %d\n", __func__, port_id, ret); @@ -3043,7 +3050,7 @@ int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate) { return __afe_port_start(port_id, afe_config, rate, - 0, NULL, ASM_MEDIA_FMT_NONE); + 0, 0, NULL, ASM_MEDIA_FMT_NONE); } EXPORT_SYMBOL(afe_port_start); @@ -3061,12 +3068,12 @@ EXPORT_SYMBOL(afe_port_start); * Returns 0 on success or error value on port start failure. */ int afe_port_start_v2(u16 port_id, union afe_port_config *afe_config, - u32 rate, u16 afe_in_channels, + u32 rate, u16 afe_in_channels, u16 afe_in_bit_width, struct afe_enc_config *enc_cfg) { return __afe_port_start(port_id, afe_config, rate, - afe_in_channels, &enc_cfg->data, - enc_cfg->format); + afe_in_channels, afe_in_bit_width, + &enc_cfg->data, enc_cfg->format); } EXPORT_SYMBOL(afe_port_start_v2); diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c index b4257f990aa5..88c27339b299 100644 --- a/sound/soc/msm/qdsp6v2/q6asm.c +++ b/sound/soc/msm/qdsp6v2/q6asm.c @@ -183,6 +183,25 @@ static inline void q6asm_update_token(u32 *token, u8 session_id, u8 stream_id, *token = asm_token.token; } +static inline uint32_t q6asm_get_pcm_format_id(uint32_t media_format_block_ver) +{ + uint32_t pcm_format_id; + + switch (media_format_block_ver) { + case PCM_MEDIA_FORMAT_V4: + pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V4; + break; + case PCM_MEDIA_FORMAT_V3: + pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3; + break; + case PCM_MEDIA_FORMAT_V2: + default: + pcm_format_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; + break; + } + return pcm_format_id; +} + /* * q6asm_get_buf_index_from_token: * Retrieve buffer index from token. @@ -2263,7 +2282,7 @@ static void q6asm_add_mmaphdr(struct audio_client *ac, struct apr_hdr *hdr, static int __q6asm_open_read(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, - bool use_v3_format) + uint32_t pcm_format_block_ver) { int rc = 0x00; struct asm_stream_cmd_open_read_v3 open; @@ -2306,10 +2325,7 @@ static int __q6asm_open_read(struct audio_client *ac, switch (format) { case FORMAT_LINEAR_PCM: open.mode_flags |= 0x00; - if (use_v3_format) - open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3; - else - open.enc_cfg_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; + open.enc_cfg_id = q6asm_get_pcm_format_id(pcm_format_block_ver); break; case FORMAT_MPEG4_AAC: open.mode_flags |= BUFFER_META_ENABLE; @@ -2372,14 +2388,14 @@ int q6asm_open_read(struct audio_client *ac, uint32_t format) { return __q6asm_open_read(ac, format, 16, - false /*use_v3_format*/); + PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/); } int q6asm_open_read_v2(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample) { return __q6asm_open_read(ac, format, bits_per_sample, - false /*use_v3_format*/); + PCM_MEDIA_FORMAT_V2 /*media fmt block ver*/); } /* @@ -2393,10 +2409,25 @@ int q6asm_open_read_v3(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample) { return __q6asm_open_read(ac, format, bits_per_sample, - true /*use_v3_format*/); + PCM_MEDIA_FORMAT_V3/*media fmt block ver*/); } EXPORT_SYMBOL(q6asm_open_read_v3); +/* + * asm_open_read_v4 - Opens audio capture session + * + * @ac: Client session handle + * @format: encoder format + * @bits_per_sample: bit width of capture session + */ +int q6asm_open_read_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample) +{ + return __q6asm_open_read(ac, format, bits_per_sample, + PCM_MEDIA_FORMAT_V4 /*media fmt block ver*/); +} +EXPORT_SYMBOL(q6asm_open_read_v4); + int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format, uint32_t passthrough_flag) { @@ -2488,7 +2519,8 @@ fail_cmd: static int __q6asm_open_write(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, uint32_t stream_id, - bool is_gapless_mode, bool use_v3_format) + bool is_gapless_mode, + uint32_t pcm_format_block_ver) { int rc = 0x00; struct asm_stream_cmd_open_write_v3 open; @@ -2564,11 +2596,7 @@ static int __q6asm_open_write(struct audio_client *ac, uint32_t format, } switch (format) { case FORMAT_LINEAR_PCM: - if (use_v3_format) - open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V3; - else - open.dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2; - + open.dec_fmt_id = q6asm_get_pcm_format_id(pcm_format_block_ver); break; case FORMAT_MPEG4_AAC: open.dec_fmt_id = ASM_MEDIA_FMT_AAC_V2; @@ -2647,7 +2675,7 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format) { return __q6asm_open_write(ac, format, 16, ac->stream_id, false /*gapless*/, - false /*use_v3_format*/); + PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/); } int q6asm_open_write_v2(struct audio_client *ac, uint32_t format, @@ -2655,7 +2683,7 @@ int q6asm_open_write_v2(struct audio_client *ac, uint32_t format, { return __q6asm_open_write(ac, format, bits_per_sample, ac->stream_id, false /*gapless*/, - false /*use_v3_format*/); + PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/); } /* @@ -2670,17 +2698,33 @@ int q6asm_open_write_v3(struct audio_client *ac, uint32_t format, { return __q6asm_open_write(ac, format, bits_per_sample, ac->stream_id, false /*gapless*/, - true /*use_v3_format*/); + PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/); } EXPORT_SYMBOL(q6asm_open_write_v3); +/* + * q6asm_open_write_v4 - Opens audio playback session + * + * @ac: Client session handle + * @format: decoder format + * @bits_per_sample: bit width of playback session + */ +int q6asm_open_write_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample) +{ + return __q6asm_open_write(ac, format, bits_per_sample, + ac->stream_id, false /*gapless*/, + PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/); +} +EXPORT_SYMBOL(q6asm_open_write_v4); + int q6asm_stream_open_write_v2(struct audio_client *ac, uint32_t format, uint16_t bits_per_sample, int32_t stream_id, bool is_gapless_mode) { return __q6asm_open_write(ac, format, bits_per_sample, stream_id, is_gapless_mode, - false /*use_v3_format*/); + PCM_MEDIA_FORMAT_V2 /*pcm_format_block_ver*/); } /* @@ -2698,10 +2742,29 @@ int q6asm_stream_open_write_v3(struct audio_client *ac, uint32_t format, { return __q6asm_open_write(ac, format, bits_per_sample, stream_id, is_gapless_mode, - true /*use_v3_format*/); + PCM_MEDIA_FORMAT_V3 /*pcm_format_block_ver*/); } EXPORT_SYMBOL(q6asm_stream_open_write_v3); +/* + * q6asm_stream_open_write_v4 - Creates audio stream for playback + * + * @ac: Client session handle + * @format: asm playback format + * @bits_per_sample: bit width of requested stream + * @stream_id: stream id of stream to be associated with this session + * @is_gapless_mode: true if gapless mode needs to be enabled + */ +int q6asm_stream_open_write_v4(struct audio_client *ac, uint32_t format, + uint16_t bits_per_sample, int32_t stream_id, + bool is_gapless_mode) +{ + return __q6asm_open_write(ac, format, bits_per_sample, + stream_id, is_gapless_mode, + PCM_MEDIA_FORMAT_V4 /*pcm_format_block_ver*/); +} +EXPORT_SYMBOL(q6asm_stream_open_write_v4); + static int __q6asm_open_read_write(struct audio_client *ac, uint32_t rd_format, uint32_t wr_format, bool is_meta_data_mode, uint32_t bits_per_sample, @@ -3525,6 +3588,108 @@ fail_cmd: } /* + * q6asm_enc_cfg_blk_pcm_v4 - sends encoder configuration parameters + * + * @ac: Client session handle + * @rate: sample rate + * @channels: number of channels + * @bits_per_sample: bit width of encoder session + * @use_default_chmap: true if default channel map to be used + * @use_back_flavor: to configure back left and right channel + * @channel_map: input channel map + * @sample_word_size: Size in bits of the word that holds a sample of a channel + * @endianness: endianness of the pcm data + * @mode: Mode to provide additional info about the pcm input data + */ +int q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, bool use_default_chmap, + bool use_back_flavor, u8 *channel_map, + uint16_t sample_word_size, uint16_t endianness, + uint16_t mode) +{ + struct asm_multi_channel_pcm_enc_cfg_v4 enc_cfg; + struct asm_enc_cfg_blk_param_v2 enc_fg_blk; + u8 *channel_mapping; + u32 frames_per_buf = 0; + int rc; + + if (!use_default_chmap && (channel_map == NULL)) { + pr_err("%s: No valid chan map and can't use default\n", + __func__); + rc = -EINVAL; + goto fail_cmd; + } + + pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__, + ac->session, rate, channels, + bits_per_sample, sample_word_size); + + memset(&enc_cfg, 0, sizeof(enc_cfg)); + q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE); + atomic_set(&ac->cmd_state, -1); + enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM; + enc_cfg.encdec.param_id = ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2; + enc_cfg.encdec.param_size = sizeof(enc_cfg) - sizeof(enc_cfg.hdr) - + sizeof(enc_cfg.encdec); + enc_cfg.encblk.frames_per_buf = frames_per_buf; + enc_cfg.encblk.enc_cfg_blk_size = enc_cfg.encdec.param_size - + sizeof(enc_fg_blk); + enc_cfg.num_channels = channels; + enc_cfg.bits_per_sample = bits_per_sample; + enc_cfg.sample_rate = rate; + enc_cfg.is_signed = 1; + enc_cfg.sample_word_size = sample_word_size; + enc_cfg.endianness = endianness; + enc_cfg.mode = mode; + channel_mapping = enc_cfg.channel_mapping; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + + if (use_default_chmap) { + pr_debug("%s: setting default channel map for %d channels", + __func__, channels); + if (q6asm_map_channels(channel_mapping, channels, + use_back_flavor)) { + pr_err("%s: map channels failed %d\n", + __func__, channels); + rc = -EINVAL; + goto fail_cmd; + } + } else { + pr_debug("%s: Using pre-defined channel map", __func__); + memcpy(channel_mapping, channel_map, + PCM_FORMAT_MAX_NUM_CHANNEL); + } + + rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg); + if (rc < 0) { + pr_err("%s: Command open failed %d\n", __func__, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) >= 0), 5*HZ); + if (!rc) { + pr_err("%s: timeout opcode[0x%x]\n", + __func__, enc_cfg.hdr.opcode); + rc = -ETIMEDOUT; + goto fail_cmd; + } + if (atomic_read(&ac->cmd_state) > 0) { + pr_err("%s: DSP returned error[%s]\n", + __func__, adsp_err_get_err_str( + atomic_read(&ac->cmd_state))); + rc = adsp_err_get_lnx_err_code( + atomic_read(&ac->cmd_state)); + goto fail_cmd; + } + return 0; +fail_cmd: + return rc; +} +EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_v4); + +/* * q6asm_enc_cfg_blk_pcm_v3 - sends encoder configuration parameters * * @ac: Client session handle @@ -3700,6 +3865,18 @@ fail_cmd: return rc; } +static int __q6asm_enc_cfg_blk_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + return q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels, + bits_per_sample, true, false, NULL, + sample_word_size, endianness, mode); +} + static int __q6asm_enc_cfg_blk_pcm_v3(struct audio_client *ac, uint32_t rate, uint32_t channels, uint16_t bits_per_sample, @@ -3749,6 +3926,31 @@ int q6asm_enc_cfg_blk_pcm_format_support_v3(struct audio_client *ac, } EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v3); +/* + * q6asm_enc_cfg_blk_pcm_format_support_v4 - sends encoder configuration + * parameters + * + * @ac: Client session handle + * @rate: sample rate + * @channels: number of channels + * @bits_per_sample: bit width of encoder session + * @sample_word_size: Size in bits of the word that holds a sample of a channel + * @endianness: endianness of the pcm data + * @mode: Mode to provide additional info about the pcm input data + */ +int q6asm_enc_cfg_blk_pcm_format_support_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + return __q6asm_enc_cfg_blk_pcm_v4(ac, rate, channels, + bits_per_sample, sample_word_size, + endianness, mode); +} +EXPORT_SYMBOL(q6asm_enc_cfg_blk_pcm_format_support_v4); + int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac, uint32_t rate, uint32_t channels) { @@ -4381,6 +4583,91 @@ fail_cmd: return rc; } +static int __q6asm_media_format_block_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + uint16_t bits_per_sample, + int stream_id, + bool use_default_chmap, + char *channel_map, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt; + u8 *channel_mapping; + int rc; + + pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__, + ac->session, rate, channels, + bits_per_sample, sample_word_size); + + memset(&fmt, 0, sizeof(fmt)); + q6asm_stream_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE, stream_id); + atomic_set(&ac->cmd_state, -1); + /* + * Updated the token field with stream/session for compressed playback + * Platform driver must know the the stream with which the command is + * associated + */ + if (ac->io_mode & COMPRESSED_STREAM_IO) + fmt.hdr.token = ((ac->session << 8) & 0xFFFF00) | + (stream_id & 0xFF); + + pr_debug("%s: token = 0x%x, stream_id %d, session 0x%x\n", + __func__, fmt.hdr.token, stream_id, ac->session); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; + fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - + sizeof(fmt.fmt_blk); + fmt.param.num_channels = channels; + fmt.param.bits_per_sample = bits_per_sample; + fmt.param.sample_rate = rate; + fmt.param.is_signed = 1; + fmt.param.sample_word_size = sample_word_size; + fmt.param.endianness = endianness; + fmt.param.mode = mode; + channel_mapping = fmt.param.channel_mapping; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + + if (use_default_chmap) { + if (q6asm_map_channels(channel_mapping, channels, false)) { + pr_err("%s: map channels failed %d\n", + __func__, channels); + rc = -EINVAL; + goto fail_cmd; + } + } else { + memcpy(channel_mapping, channel_map, + PCM_FORMAT_MAX_NUM_CHANNEL); + } + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_err("%s: Comamnd open failed %d\n", __func__, rc); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) >= 0), 5*HZ); + if (!rc) { + pr_err("%s: timeout. waited for format update\n", __func__); + rc = -ETIMEDOUT; + goto fail_cmd; + } + if (atomic_read(&ac->cmd_state) > 0) { + pr_err("%s: DSP returned error[%s]\n", + __func__, adsp_err_get_err_str( + atomic_read(&ac->cmd_state))); + rc = adsp_err_get_lnx_err_code( + atomic_read(&ac->cmd_state)); + goto fail_cmd; + } + return 0; +fail_cmd: + return rc; +} + int q6asm_media_format_block_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels) { @@ -4448,6 +4735,47 @@ int q6asm_media_format_block_pcm_format_support_v3(struct audio_client *ac, } EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v3); +/* + * q6asm_media_format_block_pcm_format_support_v4- sends pcm decoder + * configuration parameters + * + * @ac: Client session handle + * @rate: sample rate + * @channels: number of channels + * @bits_per_sample: bit width of encoder session + * @stream_id: stream id of stream to be associated with this session + * @use_default_chmap: true if default channel map to be used + * @channel_map: input channel map + * @sample_word_size: Size in bits of the word that holds a sample of a channel + * @endianness: endianness of the pcm data + * @mode: Mode to provide additional info about the pcm input data + */ +int q6asm_media_format_block_pcm_format_support_v4(struct audio_client *ac, + uint32_t rate, + uint32_t channels, + uint16_t bits_per_sample, + int stream_id, + bool use_default_chmap, + char *channel_map, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + if (!use_default_chmap && (channel_map == NULL)) { + pr_err("%s: No valid chan map and can't use default\n", + __func__); + return -EINVAL; + } + return __q6asm_media_format_block_pcm_v4(ac, rate, + channels, bits_per_sample, stream_id, + use_default_chmap, channel_map, + sample_word_size, endianness, + mode); + +} +EXPORT_SYMBOL(q6asm_media_format_block_pcm_format_support_v4); + + static int __q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels, bool use_default_chmap, char *channel_map, @@ -4581,6 +4909,78 @@ fail_cmd: return rc; } +static int __q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac, + uint32_t rate, + uint32_t channels, + bool use_default_chmap, + char *channel_map, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + struct asm_multi_channel_pcm_fmt_blk_param_v4 fmt; + u8 *channel_mapping; + int rc; + + pr_debug("%s: session[%d]rate[%d]ch[%d]bps[%d]wordsize[%d]\n", __func__, + ac->session, rate, channels, + bits_per_sample, sample_word_size); + + memset(&fmt, 0, sizeof(fmt)); + q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE); + atomic_set(&ac->cmd_state, -1); + + fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2; + fmt.fmt_blk.fmt_blk_size = sizeof(fmt) - sizeof(fmt.hdr) - + sizeof(fmt.fmt_blk); + fmt.param.num_channels = channels; + fmt.param.bits_per_sample = bits_per_sample; + fmt.param.sample_rate = rate; + fmt.param.is_signed = 1; + fmt.param.sample_word_size = sample_word_size; + fmt.param.endianness = endianness; + fmt.param.mode = mode; + channel_mapping = fmt.param.channel_mapping; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + + if (use_default_chmap) { + if (q6asm_map_channels(channel_mapping, channels, false)) { + pr_err("%s: map channels failed %d\n", + __func__, channels); + rc = -EINVAL; + goto fail_cmd; + } + } else { + memcpy(channel_mapping, channel_map, + PCM_FORMAT_MAX_NUM_CHANNEL); + } + + rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt); + if (rc < 0) { + pr_err("%s: Comamnd open failed %d\n", __func__, rc); + goto fail_cmd; + } + rc = wait_event_timeout(ac->cmd_wait, + (atomic_read(&ac->cmd_state) >= 0), 5*HZ); + if (!rc) { + pr_err("%s: timeout. waited for format update\n", __func__); + rc = -ETIMEDOUT; + goto fail_cmd; + } + if (atomic_read(&ac->cmd_state) > 0) { + pr_err("%s: DSP returned error[%s]\n", + __func__, adsp_err_get_err_str( + atomic_read(&ac->cmd_state))); + rc = adsp_err_get_lnx_err_code( + atomic_read(&ac->cmd_state)); + goto fail_cmd; + } + return 0; +fail_cmd: + return rc; +} int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac, uint32_t rate, uint32_t channels, @@ -4628,6 +5028,39 @@ int q6asm_media_format_block_multi_ch_pcm_v3(struct audio_client *ac, } EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v3); +/* + * q6asm_media_format_block_multi_ch_pcm_v4 - sends pcm decoder configuration + * parameters + * + * @ac: Client session handle + * @rate: sample rate + * @channels: number of channels + * @bits_per_sample: bit width of encoder session + * @use_default_chmap: true if default channel map to be used + * @channel_map: input channel map + * @sample_word_size: Size in bits of the word that holds a sample of a channel + * @endianness: endianness of the pcm data + * @mode: Mode to provide additional info about the pcm input data + */ +int q6asm_media_format_block_multi_ch_pcm_v4(struct audio_client *ac, + uint32_t rate, uint32_t channels, + bool use_default_chmap, + char *channel_map, + uint16_t bits_per_sample, + uint16_t sample_word_size, + uint16_t endianness, + uint16_t mode) +{ + return __q6asm_media_format_block_multi_ch_pcm_v4(ac, rate, channels, + use_default_chmap, + channel_map, + bits_per_sample, + sample_word_size, + endianness, + mode); +} +EXPORT_SYMBOL(q6asm_media_format_block_multi_ch_pcm_v4); + static int __q6asm_media_format_block_multi_aac(struct audio_client *ac, struct asm_aac_cfg *cfg, int stream_id) { |
