summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cpp.txt8
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-vfe.txt15
-rw-r--r--Documentation/devicetree/bindings/mhi/msm_mhi.txt175
-rw-r--r--Documentation/devicetree/bindings/platform/msm/msm_rmnet_mhi.txt70
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-skuk-hdk.dtsi24
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts28
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-audio.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi15
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts31
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dts21
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dtsi18
-rw-r--r--arch/arm64/boot/Makefile2
-rw-r--r--drivers/char/diag/diagfwd_mhi.c3
-rw-r--r--drivers/clk/qcom/clk-rcg.h3
-rw-r--r--drivers/clk/qcom/clk-rcg2.c70
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c6
-rw-r--r--drivers/firmware/qcom/tz_log.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c78
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c62
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h5
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c12
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_util.c15
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c153
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c13
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c2
-rw-r--r--drivers/platform/msm/mhi/mhi.h175
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c496
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.h42
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c105
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c478
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c245
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c247
-rw-r--r--drivers/platform/msm/mhi/mhi_macros.h9
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c1046
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c139
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c337
-rw-r--r--drivers/platform/msm/mhi/mhi_ring_ops.c33
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c123
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c266
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c88
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.h9
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c71
-rw-r--r--drivers/power/supply/qcom/pmic-voter.c32
-rw-r--r--drivers/power/supply/qcom/pmic-voter.h3
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c58
-rw-r--r--drivers/power/supply/qcom/smb-lib.c181
-rw-r--r--drivers/power/supply/qcom/smb-lib.h3
-rw-r--r--drivers/power/supply/qcom/smb-reg.h1
-rw-r--r--drivers/soc/qcom/icnss.c44
-rw-r--r--drivers/soc/qcom/ipc_router_mhi_xprt.c10
-rw-r--r--drivers/soc/qcom/qmi_interface.c3
-rw-r--r--drivers/soc/qcom/service-locator.c11
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c14
-rw-r--r--drivers/usb/host/xhci-plat.c12
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h3
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c133
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c28
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c27
-rw-r--r--include/linux/msm_mhi.h110
-rw-r--r--include/uapi/drm/msm_drm.h8
-rw-r--r--include/uapi/linux/eventpoll.h2
-rw-r--r--include/uapi/media/msmb_isp.h3
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c18
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c71
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h1
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-mbhc.h37
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c60
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c10
78 files changed, 3676 insertions, 2052 deletions
diff --git a/Documentation/devicetree/bindings/media/video/msm-cpp.txt b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
index 2bd9fb840830..450e4d6ee8f0 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cpp.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cpp.txt
@@ -70,6 +70,13 @@ Optional properties:
The first entry is register offset and second entry is register value.
- qcom,micro-reset: Boolean flag indicating if micro reset need to be enabled.
This needs to present on platforms that support this feature.
+- qcom,cpp-cx-ipeak: To handle Cx peak current limit.
+ <phandle bit>
+ phandle - phandle of cx ipeak device node
+ bit - bit number of client in relevant register
+ This is used to access Cx ipeak HW module to limit the current drawn by
+ various subsystem blocks on Cx power rail. CPP set their bit in tcsr register
+ if it is going to cross its own threshold.
Example:
@@ -105,6 +112,7 @@ Example:
"micro_iface_clk", "camss_ahb_clk";
"smmu_cpp_axi_clk", "cpp_vbif_ahb_clk";
qcom,clock-rates = <0 0 0 0 465000000 0 0 465000000 0 0 0 0>;
+ qcom,cpp-cx-ipeak = <&cx_ipeak_lm 2>;
qcom,min-clock-rate = <320000000>;
qcom,bus-master = <1>;
qcom,vbif-qos-setting = <0x20 0x10000000>,
diff --git a/Documentation/devicetree/bindings/media/video/msm-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-vfe.txt
index dac22f30bf1d..aaf13442fcf1 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vfe.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vfe.txt
@@ -23,6 +23,7 @@ Required properties for child node:
Only needed for child node.
- "vfe" - Required.
- "vfe_vbif" - Optional for "vfe32". Required for "vfe40".
+ - "vfe_fuse" - Optional.
- interrupts : should contain the vfe interrupt.
- interrupt-names : should specify relevant names to each interrupts
property defined.
@@ -52,9 +53,10 @@ Example:
vfe0: qcom,vfe0@fda10000 {
cell-index = <0>;
compatible = "qcom,vfe44";
- reg = <0xfda10000 0x1000>;
- <0xfda40000 0x200>;
- reg-names = "vfe", "vfe_vbif";
+ reg = <0xfda10000 0x1000>,
+ <0xfda40000 0x200>,
+ <0x7801a4 0x8>;
+ reg-names = "vfe", "vfe_vbif", "vfe_fuse";
interrupts = <0 57 0>;
interrupt-names = "vfe";
vdd-supply = <&gdsc_vfe>;
@@ -105,9 +107,10 @@ vfe0: qcom,vfe0@fda10000 {
vfe1: qcom,vfe1@fda14000 {
cell-index = <1>;
compatible = "qcom,vfe44";
- reg = <0xfda14000 0x1000>;
- <0xfda40000 0x200>;
- reg-names = "vfe", "vfe_vbif";
+ reg = <0xfda14000 0x1000>,
+ <0xfda40000 0x200>,
+ <0x7801a4 0x8>;
+ reg-names = "vfe", "vfe_vbif", "vfe_fuse";
interrupts = <0 58 0>;
interrupt-names = "vfe";
vdd-supply = <&gdsc_vfe>;
diff --git a/Documentation/devicetree/bindings/mhi/msm_mhi.txt b/Documentation/devicetree/bindings/mhi/msm_mhi.txt
index da8b021efc73..5f950604d186 100644
--- a/Documentation/devicetree/bindings/mhi/msm_mhi.txt
+++ b/Documentation/devicetree/bindings/mhi/msm_mhi.txt
@@ -5,37 +5,148 @@ Modem Host Interface protocol. The bindings referred to below, enable
the correct configuration of the interface and required sideband
signals.
-Required properties:
- - compatible: should be "qcom,mhi"
- - Refer to "Documentation/devicetree/bindings/esoc/esoc_client.txt" for
- below properties:
- - esoc-names
- - esoc-0
- - Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
- below optional properties:
- - qcom,msm-bus,name
- - qcom,msm-bus,num-cases
- - qcom,msm-bus,num-paths
- - qcom,msm-bus,vectors-KBps
- - mhi-chan-cfg-#: mhi channel configuration parameters for platform
- - mhi-event-cfg-#: mhi event ring configuration parameters for platform
- - mhi-event-rings: number of event rings supported by platform
- - mhi-dev-address-win-size: size of the MHI device addressing window
+==============
+Node Structure
+==============
-Example:
+Main node properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,mhi"
+
+- qcom,pci-dev_id
+ Usage: required
+ Value type: <u32>
+ Definition: Device id reported by modem
+
+- qcom,pci-domain
+ Usage: required
+ Value type: <u32>
+ Definition: PCIE root complex device connected to
+
+- qcom,pci-bus
+ Usage: required
+ Value type: <u32>
+ Definition: PCIE bus device connected to
+
+- qcom,pci-slot
+ Usage: required
+ Value type: <u32>
+ Definition: PCIE slot (dev_id/function) device connected to
+
+- esoc-names
+ Usage: optional
+ Value type: <string>
+ Definition: esoc name for the device
+
+- esoc-0
+ Usage: required if "esoc-names" is defined
+ Value type: phandle
+ Definition: A phandle pointing to the esoc node.
+
+- qcom,msm-bus,name
+ Usage: required if MHI is bus master
+ Value type: string
+ Definition: string representing the client name
+
+- qcom,msm-bus,num-cases
+ Usage: required if MHI is bus master
+ Value type: <u32>
+ Definition: Number of use cases MHI support. Must be set to 2.
+
+- qcom,msm-bus,num-paths
+ Usage: required if MHI is bus master
+ Value type: <u32>
+ Definition: Total number of master-slave pairs. Must be set to one.
- mhi: qcom,mhi {
- compatible = "qcom,mhi";
- esoc-names = "mdm";
- esoc-0 = <&mdm1>;
- qcom,msm-bus,name = "mhi";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps =
- <100 512 0 0>,
- <100 512 1200000000 1200000000>;
- mhi-event-rings = <6>;
- mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>;
- mhi-event-cfg-0 = <0x80 0x0 0x0 0x11>;
- mhi-dev-address-win-size= <0x10 0x00000000>;
- };
+- qcom,msm-bus,vectors-KBps
+ Usage: required if MHI is bus master
+ Value type: Array of <u32>
+ Definition: Array of tuples which define the bus bandwidth requirements.
+ Each tuple is of length 4, values are master-id, slave-id,
+ arbitrated bandwidth in KBps, and instantaneous bandwidth in
+ KBps.
+
+- mhi-chan-cfg-#
+ Usage: required
+ Value type: Array of <u32>
+ Definition: mhi channel configuration parameters for platform
+ defined as below <A B C D>:
+ A = chan number
+ B = maximum descriptors
+ C = event ring associated with channel
+ D = flags defined by mhi_macros.h GET_CHAN_PROPS
+
+- mhi-event-rings
+ Usage: required
+ Value type: <u32>
+ Definition: Number of event rings device support
+
+- mhi-event-cfg-#
+ Usage: required
+ Value type: Array of <u32>
+ Definition: mhi event ring configuration parameters for platform
+ defined as below <A B C D E F>:
+ A = maximum event descriptors
+ B = MSI associated with event
+ C = interrupt moderation (see MHI specification)
+ D = Associated channel
+ E = priority of the event ring. 0 being the highest.
+ F = flags defined by mhi_macros.h GET_EV_PROPS
+
+- qcom,mhi-address-window
+ Usage: required
+ Value type: Array of <u64>
+ Definition: start DDR address and ending DDR address device can access.
+
+- qcom,mhi-manage-boot
+ Usage: optional
+ Value type: bool
+ Definition: Determine whether MHI host manages firmware download to device.
+
+- qcom,mhi-fw-image
+ Usage: required if MHI host managing firmware download process
+ Value type: string
+ Definition: firmware image name
+
+- qcom,mhi-max-sbl
+ Usage: required if MHI host managing firmware download process
+ Value type: <u32>
+ Definition: Maximum size in bytes SBL image device support.
+
+- qcom,mhi-sg-size
+ Usage: required if MHI host managing firmware download process
+ Value type: <u32>
+ Definition: Segment size in bytes for each segment in bytes.
+
+- qcom,mhi-bb-required
+ Usage: optional
+ Value type: bool
+ Definition: Determine whether MHI device require bounce buffer
+ during active transfer. If true, during channel open host
+ will pre-allocate transfer buffers.
+
+========
+Example:
+========
+mhi: qcom,mhi {
+ compatible = "qcom,mhi";
+ qcom,pci-dev_id = <0x0301>;
+ qcom,pci-domain = <2>;
+ qcom,pci-bus = <4>;
+ qcom,pci-slot = <0>;
+ qcom,mhi-address-window = <0x0 0x80000000 0x0 0xbfffffff>;
+ esoc-names = "mdm";
+ esoc-0 = <&mdm1>;
+ qcom,msm-bus,name = "mhi";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <100 512 0 0>,
+ <100 512 1200000000 1200000000>;
+ mhi-event-rings = <1>;
+ mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>;
+ mhi-event-cfg-0 = <0x80 0x0 0x0 0x0 0 1 0x11>;
+};
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_rmnet_mhi.txt b/Documentation/devicetree/bindings/platform/msm/msm_rmnet_mhi.txt
index 5ec2fb51833e..d377a28e6617 100644
--- a/Documentation/devicetree/bindings/platform/msm/msm_rmnet_mhi.txt
+++ b/Documentation/devicetree/bindings/platform/msm/msm_rmnet_mhi.txt
@@ -1,25 +1,53 @@
MSM MHI RMNET interface device
-MHI RMNET provides a network interface over PCIe
-to transfer IP packets between modem and apps.
-
-Required properties:
-- compatible : "qcom,mhi-rmnet"
-- At least one of MHI channel
- - qcom,mhi-rx-channel : MHI channel number for incoming data
- - qcom,mhi-tx-channel : MHI channel number for outgoing data
-- Default MRU for interface
- - qcom,mhi-mru
-- Alias id to identify interface instance
+MHI RMNET provides a network interface over PCIe to transfer IP packets
+between modem and apps.
+==============
+Node Structure
+==============
+
+Main node properties:
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,mhi-rmnet"
+
+- qcom,mhi-rx-channel
+ Usage: optional if mhi-tx-channel is defined.
+ Value type: <u32>
+ Definition: MHI channel number for incoming data
+
+- qcom,mhi-tx-channel
+ Usage: optional if mhi-rx-channel is defined.
+ Value type: <u32>
+ Definition: MHI channel number for outgoing data
+
+- qcom,mhi-mru
+ Usage: required
+ Value type: <u32>
+ Definition: Default payload size for receive path.
+
+- qcom,mhi-max-mru
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum payload interface support on receive path. If
+ not defined MHI_MAX_MRU is used.
+
+- qcom,mhi-max-mtu
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum payload interface support on transmit path. If
+ not defined MHI_MAX_MTU is used.
+
+========
Example:
- aliases {
- mhi_rmnet0 = &mhi_rmnet_0;
- };
- mhi_rmnet_0: qcom,mhi-rmnet@0 {
- compatible = "qcom,mhi-rmnet";
- qcom,mhi-rx-channel = <101>;
- qcom,mhi-tx-channel = <100>;
- qcom,mhi-mru = <8000>;
- status = "okay";
- };
+========
+mhi_rmnet_0: qcom,mhi-rmnet@0 {
+ compatible = "qcom,mhi-rmnet";
+ qcom,mhi-rx-channel = <101>;
+ qcom,mhi-tx-channel = <100>;
+ qcom,mhi-mru = <8000>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi
index 6c17bca64a86..62115cf6f98a 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-jdi-a407-dualmipi-wqhd-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,18 @@
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [
+ 29 01 00 00 01 00 02 00 00
+ 29 01 00 00 01 00 04 FF 25 03 01
+ 29 01 00 00 01 00 02 00 80
+ 29 01 00 00 01 00 03 FF 25 03
+ 29 01 00 00 01 00 02 00 80
+ 29 01 00 00 01 00 10
+ A7 27 00 FF 01 15 11 02
+ 98 0F 07 70 69 14 00 00
+ 29 01 00 00 01 00 02 00 C0
+ 29 01 00 00 01 00 10
+ A7 13 00 FF 01 FF 10 02
+ 08 0F 07 74 69 14 00 00
15 01 00 00 00 00 02 35 00
05 01 00 00 78 00 02 11 00
05 01 00 00 32 00 02 29 00];
diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
index 463d352f8791..38f4802b1624 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
@@ -315,7 +315,8 @@
dpdm-supply = <&qusb_phy0>;
qcom,thermal-mitigation
- = <3000000 1500000 1000000 500000>;
+ = <3000000 2500000 2000000 1500000
+ 1000000 500000>;
qcom,chgr@1000 {
reg = <0x1000 0x100>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
index 4b3748e5a40a..0c6985a46b81 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
@@ -43,4 +43,19 @@
drive-strength = <8>; /* 2 MA */
};
};
+
+ cam_sensor_mclk0_active: cam_sensor_mclk0_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio13";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio13";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-hdk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-hdk.dtsi
index 7ed28e4c8813..3c76519acdcf 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-hdk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-hdk.dtsi
@@ -51,6 +51,30 @@
qcom,mdss-pref-prim-intf = "dsi";
};
+&mdss_hdmi_tx {
+ pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
+ "hdmi_active", "hdmi_sleep";
+ pinctrl-0 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+ pinctrl-1 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_suspend>;
+ pinctrl-2 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_cec_active &mdss_hdmi_ddc_suspend>;
+ pinctrl-3 = <&mdss_hdmi_5v_active &mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active &mdss_hdmi_cec_active>;
+ pinctrl-4 = <&mdss_hdmi_5v_suspend &mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
+};
+
+&mdss_dp_ctrl {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 77 0>;
+ qcom,aux-sel-gpio = <&tlmm 78 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
&mdss_dsi {
hw-config = "split_dsi";
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
index d535d62e521c..c2408ba7bf76 100644
--- a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
@@ -16,6 +16,7 @@
#include "sdm630.dtsi"
#include "sdm630-qrd.dtsi"
#include "msm-pm660a.dtsi"
+#include "sdm660-internal-codec.dtsi"
/ {
model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A QRD";
@@ -23,3 +24,30 @@
qcom,board-id = <0x0002000b 0x00>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
+
+&int_codec {
+ qcom,model = "sdm660-snd-card-skush";
+ /delete-property/ qcom,us-euro-gpios;
+ qcom,audio-routing =
+ "RX_BIAS", "INT_MCLK0",
+ "SPK_RX_BIAS", "INT_MCLK0",
+ "INT_LDO_H", "INT_MCLK0",
+ "MIC BIAS External2", "Headset Mic",
+ "AMIC2", "MIC BIAS External2",
+ "MIC BIAS External", "Digital Mic1",
+ "DMIC1", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic3",
+ "DMIC3", "MIC BIAS External",
+ "MIC BIAS External", "Digital Mic4",
+ "DMIC4", "MIC BIAS External",
+ "SpkrLeft IN", "SPK1 OUT",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
index 0b216a16c7e8..c1cb6441cd43 100644
--- a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
@@ -271,7 +271,7 @@
gpio@c200 {
status = "ok";
qcom,mode = <1>;
- qcom,pull = <5>;
+ qcom,pull = <4>;
qcom,vin-sel = <0>;
qcom,src-sel = <2>;
qcom,master-en = <1>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index 0fc24dc6e72b..ff51db728d85 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -24,6 +24,7 @@
#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
#include "dsi-panel-truly-1080p-cmd.dtsi"
#include "dsi-panel-truly-1080p-video.dtsi"
+#include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -67,7 +68,7 @@
qcom,panel-supply-entry@0 {
reg = <0>;
qcom,supply-name = "wqhd-vddio";
- qcom,supply-min-voltage = <1880000>;
+ qcom,supply-min-voltage = <1800000>;
qcom,supply-max-voltage = <1950000>;
qcom,supply-enable-load = <32000>;
qcom,supply-disable-load = <80>;
@@ -117,7 +118,7 @@
qcom,panel-supply-entry@0 {
reg = <0>;
qcom,supply-name = "wqhd-vddio";
- qcom,supply-min-voltage = <1880000>;
+ qcom,supply-min-voltage = <1800000>;
qcom,supply-max-voltage = <1950000>;
qcom,supply-enable-load = <32000>;
qcom,supply-disable-load = <80>;
@@ -260,3 +261,13 @@
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
+
+&dsi_rm67195_amoled_fhd_cmd {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 19 07 08 05 03 04 a0];
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts
index 48e02bbdfbfe..d9d74ea31d3d 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-qrd.dts
@@ -23,3 +23,34 @@
qcom,board-id = <0x0012000b 0>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
+
+&pm660a_oledb {
+ status = "okay";
+ qcom,oledb-default-voltage-mv = <6400>;
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_rm67195_amoled_fhd_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ qcom,platform-reset-gpio = <&tlmm 53 0>;
+ qcom,platform-te-gpio = <&tlmm 59 0>;
+};
+
+&dsi_rm67195_amoled_fhd_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <255>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dts b/arch/arm/boot/dts/qcom/sdm660-qrd.dts
index 9ad1bed8bbfa..4d120e83cb9b 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dts
@@ -64,3 +64,24 @@
&pm660l_wled {
qcom,led-strings-list = [00 01];
};
+
+&soc {
+ hbtp {
+ compatible = "qcom,hbtp-input";
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_rst_active>;
+ pinctrl-1 = <&ts_rst_suspend>;
+ vcc_ana-supply = <&pm660l_l3>;
+ vcc_dig-supply = <&pm660_l13>;
+ qcom,afe-load = <20000>;
+ qcom,afe-vtg-min = <3008000>;
+ qcom,afe-vtg-max = <3008000>;
+ qcom,dig-load = <40000>;
+ qcom,dig-vtg-min = <1808000>;
+ qcom,dig-vtg-max = <1808000>;
+ qcom,fb-resume-delay-us = <10000>;
+ qcom,afe-force-power-on;
+ qcom,afe-power-on-delay-us = <1000>;
+ qcom,afe-power-off-delay-us = <6>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
index 77e8505408d0..0e869f0e1352 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
@@ -211,24 +211,6 @@
debounce-interval = <15>;
};
};
-
- hbtp {
- compatible = "qcom,hbtp-input";
- pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
- pinctrl-0 = <&ts_rst_active>;
- pinctrl-1 = <&ts_rst_suspend>;
- vcc_ana-supply = <&pm660l_l3>;
- vcc_dig-supply = <&pm660_l13>;
- qcom,afe-load = <20000>;
- qcom,afe-vtg-min = <3008000>;
- qcom,afe-vtg-max = <3008000>;
- qcom,dig-load = <40000>;
- qcom,dig-vtg-min = <1808000>;
- qcom,dig-vtg-max = <1808000>;
- qcom,fb-resume-delay-us = <10000>;
- qcom,afe-power-on-delay-us = <1000>;
- qcom,afe-power-off-delay-us = <6>;
- };
};
/ {
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 6fee388eb386..e2ee3ba909eb 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -21,7 +21,7 @@ ifneq ($(DTB_NAMES),)
DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
else
-DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
+DTB_OBJS := $(shell find -L $(obj)/dts/ -name \*.dtb)
endif
$(obj)/Image: vmlinux FORCE
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index df26e2522baf..8b0e1f32bdc5 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -665,8 +665,7 @@ static int diag_mhi_register_ch(int id, struct diag_mhi_ch_t *ch)
atomic_set(&(ch->opened), 0);
ctxt = SET_CH_CTXT(id, ch->type);
ch->client_info.mhi_client_cb = mhi_notifier;
- return mhi_register_channel(&ch->hdl, ch->chan, 0, &ch->client_info,
- (void *)(uintptr_t)ctxt);
+ return mhi_register_channel(&ch->hdl, NULL);
}
int diag_mhi_init()
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 58fbd07e6f15..0bcada9aac5d 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -192,5 +192,6 @@ extern const struct clk_ops clk_pixel_ops;
extern const struct clk_ops clk_gfx3d_ops;
extern const struct clk_ops clk_gfx3d_src_ops;
extern const struct clk_ops clk_dp_ops;
+extern const struct clk_ops clk_esc_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 632d0f4ac9c1..ff0c8327fabe 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1349,3 +1349,73 @@ const struct clk_ops clk_gfx3d_src_ops = {
.list_registers = clk_rcg2_list_registers,
};
EXPORT_SYMBOL_GPL(clk_gfx3d_src_ops);
+
+static int clk_esc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = ((2 * parent_rate) / rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_esc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = ((2 * parent_rate) / rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_esc_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ return clk_esc_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_esc_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_esc_determine_rate,
+ .set_rate = clk_esc_set_rate,
+ .set_rate_and_parent = clk_esc_set_rate_and_parent,
+ .list_registers = clk_rcg2_list_registers,
+};
+EXPORT_SYMBOL(clk_esc_ops);
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 0bf7ef05ed06..910c36c65b6a 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -978,12 +978,11 @@ static struct clk_rcg2 esc0_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = mmcc_parent_map_1,
- .freq_tbl = ftbl_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_names = mmcc_parent_names_1,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_esc_ops,
VDD_DIG_FMAX_MAP1(
LOWER, 19200000),
},
@@ -994,12 +993,11 @@ static struct clk_rcg2 esc1_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = mmcc_parent_map_1,
- .freq_tbl = ftbl_dp_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_names = mmcc_parent_names_1,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_esc_ops,
VDD_DIG_FMAX_MAP1(
LOWER, 19200000),
},
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
index bf3a24b3eb01..11bd3aae340b 100644
--- a/drivers/firmware/qcom/tz_log.c
+++ b/drivers/firmware/qcom/tz_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,6 +57,11 @@
* TZ 3.X version info
*/
#define QSEE_VERSION_TZ_3_X 0x800000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
/*
* VMID Table
*/
@@ -125,6 +130,14 @@ struct tzdbg_int_t {
uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
};
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+ /* Wake source info : APCS_GICC_HPPIR */
+ uint32_t HPPIR;
+ /* Wake source info : APCS_GICC_AHPPIR */
+ uint32_t AHPPIR;
+};
+
/*
* Log ring buffer position
*/
@@ -179,6 +192,10 @@ struct tzdbg_t {
* Ring Buffer Length
*/
uint32_t ring_len;
+
+ /* Offset for Wakeup info */
+ uint32_t wakeup_info_off;
+
/*
* VMID to EE Mapping
*/
@@ -193,6 +210,16 @@ struct tzdbg_t {
struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
uint32_t num_interrupts;
struct tzdbg_int_t int_info[TZBSP_DIAG_INT_NUM];
+
+ /* Wake up info */
+ struct tzbsp_diag_wakeup_info_t wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+ uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+ uint8_t nonce[TZBSP_NONCE_LEN];
+
+ uint8_t tag[TZBSP_TAG_LEN];
+
/*
* We need at least 2K for the ring buffer
*/
@@ -731,10 +758,16 @@ static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
int len = 0;
int *tz_id = file->private_data;
- memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+ if (*tz_id == TZDBG_BOOT || *tz_id == TZDBG_RESET ||
+ *tz_id == TZDBG_INTERRUPT || *tz_id == TZDBG_GENERAL ||
+ *tz_id == TZDBG_VMID || *tz_id == TZDBG_LOG)
+ memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
debug_rw_buf_size);
- memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+
+ if (*tz_id == TZDBG_HYP_GENERAL || *tz_id == TZDBG_HYP_LOG)
+ memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
tzdbg.hyp_debug_rw_buf_size);
+
switch (*tz_id) {
case TZDBG_BOOT:
len = _disp_tz_boot_stats();
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index f821a81c53a6..532ff8677259 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1683,7 +1683,7 @@ static struct drm_driver msm_driver = {
.debugfs_cleanup = msm_debugfs_cleanup,
#endif
.ioctls = msm_ioctls,
- .num_ioctls = DRM_MSM_NUM_IOCTLS,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
.name = "msm_drm",
.desc = "MSM Snapdragon DRM",
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index bb3f0dca9d92..f2f3388b41c1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -590,9 +590,9 @@ int vfe_hw_probe(struct platform_device *pdev)
(struct msm_vfe_hardware_info *) match_dev->data;
/* Cx ipeak support */
if (of_find_property(pdev->dev.of_node,
- "qcom,vfe_cx_ipeak", NULL)) {
+ "qcom,vfe-cx-ipeak", NULL)) {
vfe_dev->vfe_cx_ipeak = cx_ipeak_register(
- pdev->dev.of_node, "qcom,vfe_cx_ipeak");
+ pdev->dev.of_node, "qcom,vfe-cx-ipeak");
}
} else {
vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index c8cc66a564ce..0325c5ded3cf 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -768,7 +768,6 @@ struct vfe_device {
size_t num_hvx_clk;
size_t num_norm_clk;
enum cam_ahb_clk_vote ahb_vote;
- bool turbo_vote;
struct cx_ipeak_client *vfe_cx_ipeak;
/* Sync variables*/
@@ -809,6 +808,7 @@ struct vfe_device {
uint32_t is_split;
uint32_t dual_vfe_enable;
unsigned long page_fault_addr;
+ uint32_t vfe_hw_limit;
/* Debug variables */
int dump_reg;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index d829aefe6c98..b704e84cc140 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -331,7 +331,6 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
goto ahb_vote_fail;
}
vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
- vfe_dev->turbo_vote = 0;
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
vfe_dev->vfe_base;
@@ -2563,31 +2562,53 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
int rc = 0;
int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
int ret;
+ long clk_rate, prev_clk_rate;
+ clk_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+ if (vfe_dev->msm_isp_vfe_clk_rate == clk_rate)
+ return rc;
+
+ prev_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+ vfe_dev->msm_isp_vfe_clk_rate = clk_rate;
+ /*
+ * if cx_ipeak is supported vote first so that dsp throttling is
+ * reduced before we go to turbo
+ */
+ if ((vfe_dev->vfe_cx_ipeak) &&
+ (vfe_dev->msm_isp_vfe_clk_rate >=
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx]) &&
+ prev_clk_rate <
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx]) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
+ if (ret) {
+ pr_err("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+ /*set vfe clock*/
rc = msm_camera_clk_set_rate(&vfe_dev->pdev->dev,
vfe_dev->vfe_clk[clk_idx], *rate);
if (rc < 0)
return rc;
- *rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
- vfe_dev->msm_isp_vfe_clk_rate = *rate;
- if (vfe_dev->vfe_cx_ipeak) {
- if (vfe_dev->msm_isp_vfe_clk_rate >=
- vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
- [vfe_dev->hw_info->vfe_clk_idx] &&
- vfe_dev->turbo_vote == 0) {
- ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
- if (ret)
- pr_debug("%s: cx_ipeak_update failed %d\n",
- __func__, ret);
- else
- vfe_dev->turbo_vote = 1;
- } else if (vfe_dev->turbo_vote == 1) {
- ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
- if (ret)
- pr_debug("%s: cx_ipeak_update failed %d\n",
- __func__, ret);
- else
- vfe_dev->turbo_vote = 0;
+ /*
+ * if cx_ipeak is supported remove the vote for non-turbo clock and
+ * if voting done earlier
+ */
+ if ((vfe_dev->vfe_cx_ipeak) &&
+ (vfe_dev->msm_isp_vfe_clk_rate <
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx]) &&
+ prev_clk_rate >=
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx]) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
+ if (ret) {
+ pr_err("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ return ret;
}
}
if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
@@ -2742,6 +2763,8 @@ int msm_vfe47_enable_regulators(struct vfe_device *vfe_dev, int enable)
int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev)
{
int rc = 0;
+ void __iomem *vfe_fuse_base;
+ uint32_t vfe_fuse_base_size;
vfe_dev->vfe_base = msm_camera_get_reg_base(vfe_dev->pdev, "vfe", 0);
if (!vfe_dev->vfe_base)
@@ -2766,7 +2789,18 @@ int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev)
rc = -ENOMEM;
goto get_res_fail;
}
-
+ vfe_dev->vfe_hw_limit = 0;
+ vfe_fuse_base = msm_camera_get_reg_base(vfe_dev->pdev,
+ "vfe_fuse", 0);
+ vfe_fuse_base_size = msm_camera_get_res_size(vfe_dev->pdev,
+ "vfe_fuse");
+ if (vfe_fuse_base) {
+ if (vfe_fuse_base_size)
+ vfe_dev->vfe_hw_limit =
+ (msm_camera_io_r(vfe_fuse_base) >> 5) & 0x1;
+ msm_camera_put_reg_base(vfe_dev->pdev, vfe_fuse_base,
+ "vfe_fuse", 0);
+ }
rc = vfe_dev->hw_info->vfe_ops.platform_ops.get_regulators(vfe_dev);
if (rc)
goto get_regulator_fail;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 22246f613462..507198721ccc 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -1428,6 +1428,20 @@ static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
vfe_dev->vfe_ub_policy = *cfg_data;
break;
}
+ case GET_VFE_HW_LIMIT: {
+ uint32_t *hw_limit = NULL;
+
+ if (cmd_len < sizeof(uint32_t)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(uint32_t));
+ return -EINVAL;
+ }
+
+ hw_limit = (uint32_t *)cfg_data;
+ *hw_limit = vfe_dev->vfe_hw_limit;
+ break;
+ }
}
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 08aab077eec7..8402e31364b9 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -980,6 +980,7 @@ static int cpp_init_hardware(struct cpp_device *cpp_dev)
{
int rc = 0;
uint32_t vbif_version;
+ cpp_dev->turbo_vote = 0;
rc = msm_camera_regulator_enable(cpp_dev->cpp_vdd,
cpp_dev->num_reg, true);
@@ -1432,6 +1433,14 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return -ENODEV;
}
+ if (cpp_dev->turbo_vote == 1) {
+ rc = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
+ if (rc)
+ pr_err("cx_ipeak_update failed");
+ else
+ cpp_dev->turbo_vote = 0;
+ }
+
cpp_dev->cpp_open_cnt--;
if (cpp_dev->cpp_open_cnt == 0) {
pr_debug("irq_status: 0x%x\n",
@@ -2955,6 +2964,38 @@ static int msm_cpp_validate_input(unsigned int cmd, void *arg,
return 0;
}
+unsigned long cpp_cx_ipeak_update(struct cpp_device *cpp_dev,
+ unsigned long clock, int idx)
+{
+ unsigned long clock_rate = 0;
+ int ret = 0;
+
+ if ((clock >= cpp_dev->hw_info.freq_tbl
+ [(cpp_dev->hw_info.freq_tbl_count) - 1]) &&
+ (cpp_dev->turbo_vote == 0)) {
+ ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, true);
+ if (ret) {
+ pr_err("cx_ipeak voting failed setting clock below turbo");
+ clock = cpp_dev->hw_info.freq_tbl
+ [(cpp_dev->hw_info.freq_tbl_count) - 2];
+ } else {
+ cpp_dev->turbo_vote = 1;
+ }
+ clock_rate = msm_cpp_set_core_clk(cpp_dev, clock, idx);
+ } else if (clock < cpp_dev->hw_info.freq_tbl
+ [(cpp_dev->hw_info.freq_tbl_count) - 1]) {
+ clock_rate = msm_cpp_set_core_clk(cpp_dev, clock, idx);
+ if (cpp_dev->turbo_vote == 1) {
+ ret = cx_ipeak_update(cpp_dev->cpp_cx_ipeak, false);
+ if (ret)
+ pr_err("cx_ipeak unvoting failed");
+ else
+ cpp_dev->turbo_vote = 0;
+ }
+ }
+ return clock_rate;
+}
+
long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
@@ -3337,9 +3378,15 @@ STREAM_BUFF_END:
mutex_unlock(&cpp_dev->mutex);
return -EINVAL;
}
- clock_rate = msm_cpp_set_core_clk(cpp_dev,
- clock_settings.clock_rate,
- msm_cpp_core_clk_idx);
+ if (cpp_dev->cpp_cx_ipeak) {
+ clock_rate = cpp_cx_ipeak_update(cpp_dev,
+ clock_settings.clock_rate,
+ msm_cpp_core_clk_idx);
+ } else {
+ clock_rate = msm_cpp_set_core_clk(cpp_dev,
+ clock_settings.clock_rate,
+ msm_cpp_core_clk_idx);
+ }
if (rc < 0) {
pr_err("Fail to set core clk\n");
mutex_unlock(&cpp_dev->mutex);
@@ -4391,6 +4438,15 @@ static int cpp_probe(struct platform_device *pdev)
}
}
+ if (of_find_property(pdev->dev.of_node, "qcom,cpp-cx-ipeak", NULL)) {
+ cpp_dev->cpp_cx_ipeak = cx_ipeak_register(
+ pdev->dev.of_node, "qcom,cpp-cx-ipeak");
+ if (cpp_dev->cpp_cx_ipeak)
+ CPP_DBG("Cx ipeak Registration Successful ");
+ else
+ pr_err("Cx ipeak Registration Unsuccessful");
+ }
+
rc = msm_camera_get_reset_info(pdev,
&cpp_dev->micro_iface_reset);
if (rc < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
index e69b9d633a1f..a05448091e42 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include "cam_soc_api.h"
#include "cam_hw_ops.h"
#include <media/msmb_pproc.h>
+#include <soc/qcom/cx_ipeak.h>
/* hw version info:
31:28 Major version
@@ -284,6 +285,8 @@ struct cpp_device {
uint32_t micro_reset;
struct msm_cpp_payload_params payload_params;
struct msm_cpp_vbif_data *vbif_data;
+ bool turbo_vote;
+ struct cx_ipeak_client *cpp_cx_ipeak;
};
int msm_cpp_set_micro_clk(struct cpp_device *cpp_dev);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 5c7b4df40d5d..e170c9ffafc7 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -539,6 +539,7 @@ static void *sde_rotator_get_userptr(void *alloc_ctx,
buf->ctx = ctx;
buf->rot_dev = rot_dev;
if (ctx->secure_camera) {
+ buf->buffer = NULL;
buf->handle = ion_import_dma_buf(iclient,
buf->fd);
if (IS_ERR_OR_NULL(buf->handle)) {
@@ -552,6 +553,7 @@ static void *sde_rotator_get_userptr(void *alloc_ctx,
buf->ctx->session_id,
buf->fd, &buf->handle);
} else {
+ buf->handle = NULL;
buf->buffer = dma_buf_get(buf->fd);
if (IS_ERR_OR_NULL(buf->buffer)) {
SDEDEV_ERR(rot_dev->dev,
@@ -578,6 +580,8 @@ error_buf_get:
static void sde_rotator_put_userptr(void *buf_priv)
{
struct sde_rotator_buf_handle *buf = buf_priv;
+ struct ion_client *iclient;
+ struct sde_rotator_device *rot_dev;
if (IS_ERR_OR_NULL(buf))
return;
@@ -588,6 +592,9 @@ static void sde_rotator_put_userptr(void *buf_priv)
return;
}
+ rot_dev = buf->ctx->rot_dev;
+ iclient = buf->rot_dev->mdata->iclient;
+
SDEDEV_DBG(buf->rot_dev->dev, "put dmabuf s:%d fd:%d buf:%pad\n",
buf->ctx->session_id,
buf->fd, &buf->buffer);
@@ -597,6 +604,11 @@ static void sde_rotator_put_userptr(void *buf_priv)
buf->buffer = NULL;
}
+ if (buf->handle) {
+ ion_free(iclient, buf->handle);
+ buf->handle = NULL;
+ }
+
kfree(buf_priv);
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
index 0eaf1960ec27..882e3dcd6277 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -768,10 +768,17 @@ static int sde_mdp_get_img(struct sde_fb_data *img,
len = &data->len;
data->flags |= img->flags;
data->offset = img->offset;
- if (data->flags & SDE_ROT_EXT_DMA_BUF)
+
+ if ((data->flags & SDE_SECURE_CAMERA_SESSION) &&
+ IS_ERR_OR_NULL(img->handle)) {
+ SDEROT_ERR("error on ion_import_fb\n");
+ ret = PTR_ERR(img->handle);
+ img->handle = NULL;
+ return ret;
+ } else if (data->flags & SDE_ROT_EXT_DMA_BUF) {
data->srcp_dma_buf = img->buffer;
- else if (IS_ERR(data->srcp_dma_buf)) {
- SDEROT_ERR("error on ion_import_fd\n");
+ } else if (IS_ERR(data->srcp_dma_buf)) {
+ SDEROT_ERR("error on dma_buf\n");
ret = PTR_ERR(data->srcp_dma_buf);
data->srcp_dma_buf = NULL;
return ret;
@@ -845,8 +852,6 @@ static int sde_mdp_get_img(struct sde_fb_data *img,
ret = 0;
} while (0);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(iclient, ihandle);
return ret;
}
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index 9117ea7d08c0..6b23a8f61f35 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,13 +27,13 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/of_device.h>
+#include <linux/rtnetlink.h>
#define RMNET_MHI_DRIVER_NAME "rmnet_mhi"
#define RMNET_MHI_DEV_NAME "rmnet_mhi%d"
#define MHI_DEFAULT_MTU 8000
#define MHI_MAX_MRU 0xFFFF
#define MHI_NAPI_WEIGHT_VALUE 12
-#define MHI_RX_HEADROOM 64
#define WATCHDOG_TIMEOUT (30 * HZ)
#define RMNET_IPC_LOG_PAGES (100)
@@ -79,6 +79,7 @@ struct __packed mhi_skb_priv {
struct rmnet_mhi_private {
struct list_head node;
+ u32 dev_id;
struct mhi_client_handle *tx_client_handle;
struct mhi_client_handle *rx_client_handle;
enum MHI_CLIENT_CHANNEL tx_channel;
@@ -87,6 +88,8 @@ struct rmnet_mhi_private {
struct sk_buff_head rx_buffers;
atomic_t rx_pool_len;
u32 mru;
+ u32 max_mru;
+ u32 max_mtu;
struct napi_struct napi;
gfp_t allocation_flags;
uint32_t tx_buffers_max;
@@ -118,9 +121,9 @@ static int rmnet_mhi_process_fragment(struct rmnet_mhi_private *rmnet_mhi_ptr,
if (rmnet_mhi_ptr->frag_skb) {
/* Merge the new skb into the old fragment */
temp_skb = skb_copy_expand(rmnet_mhi_ptr->frag_skb,
- MHI_RX_HEADROOM,
- skb->len,
- GFP_ATOMIC);
+ 0,
+ skb->len,
+ GFP_ATOMIC);
if (!temp_skb) {
kfree(rmnet_mhi_ptr->frag_skb);
rmnet_mhi_ptr->frag_skb = NULL;
@@ -207,9 +210,8 @@ static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
return -ENOMEM;
}
skb_priv = (struct mhi_skb_priv *)(skb->cb);
- skb_priv->dma_size = cur_mru - MHI_RX_HEADROOM;
+ skb_priv->dma_size = cur_mru;
skb_priv->dma_addr = 0;
- skb_reserve(skb, MHI_RX_HEADROOM);
/* These steps must be in atomic context */
spin_lock_irqsave(&rmnet_mhi_ptr->alloc_lock, flags);
@@ -584,7 +586,10 @@ static int rmnet_mhi_stop(struct net_device *dev)
static int rmnet_mhi_change_mtu(struct net_device *dev, int new_mtu)
{
- if (0 > new_mtu || MHI_MAX_MTU < new_mtu)
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+ if (new_mtu < 0 || rmnet_mhi_ptr->max_mtu < new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
@@ -667,11 +672,11 @@ static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
switch (ext_cmd.extended_ioctl) {
case RMNET_IOCTL_SET_MRU:
- if (!ext_cmd.u.data || ext_cmd.u.data > MHI_MAX_MRU) {
- rmnet_log(rmnet_mhi_ptr,
- MSG_CRITICAL,
- "Can't set MRU, value %u is invalid\n",
- ext_cmd.u.data);
+ if (!ext_cmd.u.data ||
+ ext_cmd.u.data > rmnet_mhi_ptr->max_mru) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "Can't set MRU, value:%u is invalid max:%u\n",
+ ext_cmd.u.data, rmnet_mhi_ptr->max_mru);
return -EINVAL;
}
rmnet_log(rmnet_mhi_ptr,
@@ -793,6 +798,8 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
int ret = 0;
struct rmnet_mhi_private **rmnet_mhi_ctxt = NULL;
int r = 0;
+ char ifalias[IFALIASZ];
+ struct mhi_client_handle *client_handle = NULL;
rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered.\n");
@@ -826,6 +833,7 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
} else {
rmnet_mhi_ptr->tx_enabled = 1;
}
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
}
if (rmnet_mhi_ptr->rx_client_handle != NULL) {
rmnet_log(rmnet_mhi_ptr,
@@ -841,7 +849,26 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
} else {
rmnet_mhi_ptr->rx_enabled = 1;
}
+ /* Both tx & rx client handle contain same device info */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
}
+
+ if (!client_handle) {
+ ret = -EINVAL;
+ goto net_dev_alloc_fail;
+ }
+
+ snprintf(ifalias,
+ sizeof(ifalias),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ RMNET_MHI_DRIVER_NAME,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
+
+ rtnl_lock();
rmnet_mhi_ptr->dev =
alloc_netdev(sizeof(struct rmnet_mhi_private *),
RMNET_MHI_DEV_NAME,
@@ -854,7 +881,9 @@ static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
goto net_dev_alloc_fail;
}
SET_NETDEV_DEV(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->pdev->dev);
+ dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias));
rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev);
+ rtnl_unlock();
*rmnet_mhi_ctxt = rmnet_mhi_ptr;
ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev),
@@ -981,17 +1010,16 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
}
}
-static struct mhi_client_info_t rmnet_mhi_info = {rmnet_mhi_cb};
-
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
static void rmnet_mhi_create_debugfs(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
- char node_name[15];
+ char node_name[32];
int i;
const umode_t mode = (S_IRUSR | S_IWUSR);
struct dentry *file;
+ struct mhi_client_handle *client_handle;
const struct {
char *name;
@@ -1047,8 +1075,20 @@ static void rmnet_mhi_create_debugfs(struct rmnet_mhi_private *rmnet_mhi_ptr)
},
};
- snprintf(node_name, sizeof(node_name), "%s%d",
- RMNET_MHI_DRIVER_NAME, rmnet_mhi_ptr->pdev->id);
+ /* Both tx & rx client handle contain same device info */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
+ if (!client_handle)
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
+
+ snprintf(node_name,
+ sizeof(node_name),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ RMNET_MHI_DRIVER_NAME,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
if (IS_ERR_OR_NULL(dentry))
return;
@@ -1109,11 +1149,16 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
int rc;
u32 channel;
struct rmnet_mhi_private *rmnet_mhi_ptr;
- char node_name[15];
+ struct mhi_client_handle *client_handle = NULL;
+ char node_name[32];
+ struct mhi_client_info_t client_info;
if (unlikely(!pdev->dev.of_node))
return -ENODEV;
+ if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+ return -EPROBE_DEFER;
+
pdev->id = of_alias_get_id(pdev->dev.of_node, "mhi_rmnet");
if (unlikely(pdev->id < 0))
return -ENODEV;
@@ -1135,15 +1180,50 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
}
rc = of_property_read_u32(pdev->dev.of_node,
+ "cell-index",
+ &rmnet_mhi_ptr->dev_id);
+ if (unlikely(rc)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "failed to get valid 'cell-index'\n");
+ goto probe_fail;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-max-mru",
+ &rmnet_mhi_ptr->max_mru);
+ if (likely(rc)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "max-mru not defined, setting to max %d\n",
+ MHI_MAX_MRU);
+ rmnet_mhi_ptr->max_mru = MHI_MAX_MRU;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-max-mtu",
+ &rmnet_mhi_ptr->max_mtu);
+ if (likely(rc)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "max-mtu not defined, setting to max %d\n",
+ MHI_MAX_MTU);
+ rmnet_mhi_ptr->max_mtu = MHI_MAX_MTU;
+ }
+
+ client_info.dev = &pdev->dev;
+ client_info.node_name = "qcom,mhi";
+ client_info.mhi_client_cb = rmnet_mhi_cb;
+ client_info.user_data = rmnet_mhi_ptr;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
"qcom,mhi-tx-channel",
&channel);
if (rc == 0) {
rmnet_mhi_ptr->tx_channel = channel;
+ client_info.chan = channel;
+ client_info.max_payload = rmnet_mhi_ptr->max_mtu;
+
rc = mhi_register_channel(&rmnet_mhi_ptr->tx_client_handle,
- rmnet_mhi_ptr->tx_channel,
- 0,
- &rmnet_mhi_info,
- rmnet_mhi_ptr);
+ &client_info);
if (unlikely(rc)) {
rmnet_log(rmnet_mhi_ptr,
MSG_CRITICAL,
@@ -1152,6 +1232,7 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
rc);
goto probe_fail;
}
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
}
rc = of_property_read_u32(pdev->dev.of_node,
@@ -1159,11 +1240,10 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
&channel);
if (rc == 0) {
rmnet_mhi_ptr->rx_channel = channel;
+ client_info.max_payload = rmnet_mhi_ptr->max_mru;
+ client_info.chan = channel;
rc = mhi_register_channel(&rmnet_mhi_ptr->rx_client_handle,
- rmnet_mhi_ptr->rx_channel,
- 0,
- &rmnet_mhi_info,
- rmnet_mhi_ptr);
+ &client_info);
if (unlikely(rc)) {
rmnet_log(rmnet_mhi_ptr,
MSG_CRITICAL,
@@ -1172,22 +1252,31 @@ static int rmnet_mhi_probe(struct platform_device *pdev)
rc);
goto probe_fail;
}
-
+ /* overwriting tx_client_handle is ok because dev_id and
+ * bdf are same for both channels
+ */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
INIT_WORK(&rmnet_mhi_ptr->alloc_work, rmnet_mhi_alloc_work);
spin_lock_init(&rmnet_mhi_ptr->alloc_lock);
}
/* We must've have @ least one valid channel */
- if (!rmnet_mhi_ptr->rx_client_handle &&
- !rmnet_mhi_ptr->tx_client_handle) {
+ if (!client_handle) {
rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
"No registered channels\n");
rc = -ENODEV;
goto probe_fail;
}
- snprintf(node_name, sizeof(node_name), "%s%d",
- RMNET_MHI_DRIVER_NAME, pdev->id);
+ snprintf(node_name,
+ sizeof(node_name),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ RMNET_MHI_DRIVER_NAME,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
rmnet_mhi_ptr->rmnet_ipc_log =
ipc_log_context_create(RMNET_IPC_LOG_PAGES,
node_name, 0);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index a0acb2d0cb79..7260bef314a4 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -80,12 +80,20 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
}
}
- if (wil->platform_ops.suspend)
+ /* Disable PCIe IRQ to prevent sporadic IRQs when PCIe is suspending */
+ wil_dbg_pm(wil, "Disabling PCIe IRQ before suspending\n");
+ wil_disable_irq(wil);
+
+ if (wil->platform_ops.suspend) {
rc = wil->platform_ops.suspend(wil->platform_handle);
+ if (rc)
+ wil_enable_irq(wil);
+ }
out:
wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
+
return rc;
}
@@ -104,6 +112,9 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
}
}
+ wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
+ wil_enable_irq(wil);
+
/* if netif up, bring hardware up
* During open(), IFF_UP set after actual device method
* invocation. This prevent recursive call to wil_up()
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index a2665c9e9688..25364e8efa38 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1974,6 +1974,8 @@ static void ipa_cleanup_wlan_rx_common_cache(void)
struct ipa_rx_pkt_wrapper *rx_pkt;
struct ipa_rx_pkt_wrapper *tmp;
+ spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
list_for_each_entry_safe(rx_pkt, tmp,
&ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
list_del(&rx_pkt->link);
@@ -1994,6 +1996,8 @@ static void ipa_cleanup_wlan_rx_common_cache(void)
IPAERR("wlan comm buff total cnt: %d\n",
ipa_ctx->wc_memb.wlan_comm_total_cnt);
+ spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
}
static void ipa_alloc_wlan_rx_common_cache(u32 size)
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
index d14f8da15595..00d52d0d9115 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -361,6 +361,7 @@ int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
IPAERR("client %d (ep: %d) connected\n", in->dl.client,
ipa_ep_idx_dl);
+ ipa_inc_acquire_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
fail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
@@ -436,6 +437,7 @@ int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
ipa_disable_data_path(ipa_ep_idx_dl);
memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+ ipa_dec_release_wakelock(IPA_WAKELOCK_REF_CLIENT_ODU_RX);
fail:
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index 3d40d114437a..4bce96102525 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,7 +29,6 @@
#include <linux/list.h>
#include <linux/dma-mapping.h>
-extern struct mhi_pcie_devices mhi_devices;
struct mhi_device_ctxt;
enum MHI_DEBUG_LEVEL {
@@ -49,23 +48,56 @@ struct pcie_core_info {
u32 mhi_ver;
void __iomem *bar0_base;
void __iomem *bar0_end;
- void __iomem *bar2_base;
- void __iomem *bar2_end;
u32 irq_base;
u32 max_nr_msis;
+ u32 domain;
+ u32 bus;
+ u32 slot;
struct pci_saved_state *pcie_state;
+ bool pci_master;
+};
+
+struct firmware_info {
+ const char *fw_image;
+ size_t max_sbl_len;
+ size_t segment_size;
+};
+
+struct bhie_mem_info {
+ void *pre_aligned;
+ void *aligned;
+ size_t alloc_size;
+ size_t size;
+ phys_addr_t phys_addr;
+ dma_addr_t dma_handle;
+};
+
+struct bhie_vec_table {
+ struct scatterlist *sg_list;
+ struct bhie_mem_info *bhie_mem_info;
+ struct bhi_vec_entry *bhi_vec_entry;
+ unsigned segment_count;
+ u32 sequence; /* sequence to indicate new xfer */
};
struct bhi_ctxt_t {
void __iomem *bhi_base;
+ void *unaligned_image_loc;
+ dma_addr_t dma_handle;
+ size_t alloc_size;
void *image_loc;
dma_addr_t phy_image_loc;
size_t image_size;
- void *unaligned_image_loc;
dev_t bhi_dev;
struct cdev cdev;
- struct class *bhi_class;
struct device *dev;
+ u32 alignment;
+ u32 poll_timeout;
+ /* BHI/E vector table */
+ bool manage_boot; /* fw download done by MHI host */
+ struct work_struct fw_load_work;
+ struct firmware_info firmware_info;
+ struct bhie_vec_table fw_table;
};
enum MHI_CHAN_DIR {
@@ -316,6 +348,11 @@ struct mhi_ring {
u32 msi_disable_cntr;
u32 msi_enable_cntr;
spinlock_t ring_lock;
+ struct dma_pool *dma_pool;
+ struct tasklet_struct ev_task;
+ struct work_struct ev_worker;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ int index;
};
enum MHI_CMD_STATUS {
@@ -344,25 +381,27 @@ enum MHI_INIT_ERROR_STAGE {
};
enum STATE_TRANSITION {
- STATE_TRANSITION_RESET = 0x0,
- STATE_TRANSITION_READY = 0x1,
- STATE_TRANSITION_M0 = 0x2,
- STATE_TRANSITION_M1 = 0x3,
- STATE_TRANSITION_M2 = 0x4,
- STATE_TRANSITION_M3 = 0x5,
- STATE_TRANSITION_BHI = 0x6,
- STATE_TRANSITION_SBL = 0x7,
- STATE_TRANSITION_AMSS = 0x8,
- STATE_TRANSITION_LINK_DOWN = 0x9,
- STATE_TRANSITION_WAKE = 0xA,
- STATE_TRANSITION_SYS_ERR = 0xFF,
- STATE_TRANSITION_reserved = 0x80000000
+ STATE_TRANSITION_RESET = MHI_STATE_RESET,
+ STATE_TRANSITION_READY = MHI_STATE_READY,
+ STATE_TRANSITION_M0 = MHI_STATE_M0,
+ STATE_TRANSITION_M1 = MHI_STATE_M1,
+ STATE_TRANSITION_M2 = MHI_STATE_M2,
+ STATE_TRANSITION_M3 = MHI_STATE_M3,
+ STATE_TRANSITION_BHI,
+ STATE_TRANSITION_SBL,
+ STATE_TRANSITION_AMSS,
+ STATE_TRANSITION_LINK_DOWN,
+ STATE_TRANSITION_WAKE,
+ STATE_TRANSITION_BHIE,
+ STATE_TRANSITION_SYS_ERR,
+ STATE_TRANSITION_MAX
};
enum MHI_EXEC_ENV {
MHI_EXEC_ENV_PBL = 0x0,
MHI_EXEC_ENV_SBL = 0x1,
MHI_EXEC_ENV_AMSS = 0x2,
+ MHI_EXEC_ENV_BHIE = 0x3,
MHI_EXEC_ENV_reserved = 0x80000000
};
@@ -382,7 +421,7 @@ struct mhi_chan_cfg {
union mhi_cmd_pkt cmd_pkt;
};
-struct mhi_client_handle {
+struct mhi_client_config {
struct mhi_chan_info chan_info;
struct mhi_device_ctxt *mhi_dev_ctxt;
struct mhi_client_info_t client_info;
@@ -412,9 +451,12 @@ struct mhi_state_work_queue {
struct mhi_buf_info {
dma_addr_t bb_p_addr;
+ dma_addr_t pre_alloc_p_addr;
void *bb_v_addr;
+ void *pre_alloc_v_addr;
void *client_buf;
size_t buf_len;
+ size_t pre_alloc_len;
size_t filled_size;
enum dma_data_direction dir;
int bb_active;
@@ -431,23 +473,19 @@ struct mhi_counters {
u32 bb_used[MHI_MAX_CHANNELS];
atomic_t device_wake;
atomic_t outbound_acks;
- atomic_t events_pending;
u32 *msi_counter;
u32 mhi_reset_cntr;
+ u32 link_down_cntr;
+ u32 link_up_cntr;
};
struct mhi_flags {
u32 mhi_initialized;
u32 link_up;
- int stop_threads;
- u32 kill_threads;
- u32 ev_thread_stopped;
- u32 st_thread_stopped;
+ bool bb_required;
};
struct mhi_wait_queues {
- wait_queue_head_t *mhi_event_wq;
- wait_queue_head_t *state_change_event;
wait_queue_head_t *m0_event;
wait_queue_head_t *m3_event;
wait_queue_head_t *bhi_event;
@@ -486,13 +524,17 @@ struct mhi_dev_space {
};
struct mhi_device_ctxt {
+ struct list_head node;
+ struct pcie_core_info core;
+ struct msm_pcie_register_event mhi_pci_link_event;
+ struct pci_dev *pcie_device;
+ struct bhi_ctxt_t bhi_ctxt;
+ struct platform_device *plat_dev;
enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
enum MHI_STATE mhi_state; /* protocol state */
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
- struct mhi_pcie_dev_info *dev_info;
- struct pcie_core_info *dev_props;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
@@ -500,12 +542,9 @@ struct mhi_device_ctxt {
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
-
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
- struct task_struct *event_thread_handle;
- struct task_struct *st_thread_handle;
- struct tasklet_struct ev_task; /* Process control Events */
+ struct work_struct st_thread_worker;
struct work_struct process_m1_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
@@ -517,7 +556,9 @@ struct mhi_device_ctxt {
struct hrtimer m1_timer;
ktime_t m1_timeout;
+ u32 poll_reset_timeout_ms;
+ struct notifier_block mhi_ssr_nb;
struct esoc_desc *esoc_handle;
void *esoc_ssr_handle;
@@ -534,35 +575,47 @@ struct mhi_device_ctxt {
struct wakeup_source w_lock;
char *chan_info;
- struct dentry *mhi_parent_folder;
-};
+ struct dentry *child;
+ struct dentry *parent;
+ void *mhi_ipc_log;
+
+ /* Shadow functions since not all device supports runtime pm */
+ int (*bus_master_rt_get)(struct pci_dev *pci_dev);
+ void (*bus_master_rt_put)(struct pci_dev *pci_dev);
+ void (*runtime_get)(struct mhi_device_ctxt *mhi_dev_ctxt);
+ void (*runtime_put)(struct mhi_device_ctxt *mhi_dev_ctxt);
+ void (*assert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set);
+ void (*deassert_wake)(struct mhi_device_ctxt *mhi_dev_ctxt);
-struct mhi_pcie_dev_info {
- struct pcie_core_info core;
- struct mhi_device_ctxt mhi_ctxt;
- struct msm_pcie_register_event mhi_pci_link_event;
- struct pci_dev *pcie_device;
- struct pci_driver *mhi_pcie_driver;
- struct bhi_ctxt_t bhi_ctxt;
- struct platform_device *plat_dev;
- u32 link_down_cntr;
- u32 link_up_cntr;
+ struct completion cmd_complete;
};
-struct mhi_pcie_devices {
- struct mhi_pcie_dev_info device_list[MHI_MAX_SUPPORTED_DEVICES];
- s32 nr_of_devices;
+struct mhi_device_driver {
+ struct mutex lock;
+ struct list_head head;
+ struct class *mhi_bhi_class;
+ struct dentry *parent;
};
struct mhi_event_ring_cfg {
u32 nr_desc;
u32 msi_vec;
u32 intmod;
+ enum MHI_CLIENT_CHANNEL chan;
u32 flags;
+ /*
+ * Priority of event handling:
+ * 0 = highest, handle events in isr (reserved for future)
+ * 1 = handles event using tasklet
+ * 2 = handles events using workerthread
+ */
+ u32 priority;
enum MHI_RING_CLASS class;
enum MHI_EVENT_RING_STATE state;
irqreturn_t (*mhi_handler_ptr)(int , void *);
};
+#define MHI_EV_PRIORITY_TASKLET (1)
struct mhi_data_buf {
dma_addr_t bounce_buffer;
@@ -570,18 +623,20 @@ struct mhi_data_buf {
u32 bounce_flag;
};
+extern struct mhi_device_driver *mhi_device_drv;
+
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id);
int mhi_reset_all_thread_queues(
struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_add_elements_to_event_rings(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
-int get_nr_avail_ring_elements(struct mhi_ring *ring);
+int get_nr_avail_ring_elements(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *ring);
int get_nr_enclosed_el(struct mhi_ring *ring, void *loc_1,
void *loc_2, u32 *nr_el);
int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
- struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_init_device_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 nr_ev_el, u32 event_ring_index);
int mhi_send_cmd(struct mhi_device_ctxt *dest_device,
@@ -618,25 +673,24 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt **cmd_pkt, u32 event_index);
int parse_cmd_event(struct mhi_device_ctxt *ctxt,
union mhi_event_pkt *event, u32 event_index);
-int parse_event_thread(void *ctxt);
int mhi_test_for_device_ready(
struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_test_for_device_reset(
struct mhi_device_ctxt *mhi_dev_ctxt);
int validate_ring_el_addr(struct mhi_ring *ring, uintptr_t addr);
int validate_ev_el_addr(struct mhi_ring *ring, uintptr_t addr);
-int mhi_state_change_thread(void *ctxt);
+void mhi_state_change_worker(struct work_struct *work);
int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION new_state);
int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt);
enum hrtimer_restart mhi_initiate_m1(struct hrtimer *timer);
int mhi_pci_suspend(struct device *dev);
int mhi_pci_resume(struct device *dev);
-int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int mhi_init_pcie_device(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_init_pm_sysfs(struct device *dev);
void mhi_rem_pm_sysfs(struct device *dev);
void mhi_pci_remove(struct pci_dev *mhi_device);
-int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int mhi_ctxt_init(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_chan_max_buffers(u32 chan);
int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_link_state_cb(struct msm_pcie_notify *notify);
@@ -644,6 +698,10 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
+void mhi_master_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_master_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_slave_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_slave_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
bool force_set);
@@ -691,10 +749,13 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void process_m1_transition(struct work_struct *work);
-int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt);
void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state);
const char *state_transition_str(enum STATE_TRANSITION state);
-void mhi_ctrl_ev_task(unsigned long data);
+void mhi_ev_task(unsigned long data);
+void process_event_ring(struct work_struct *work);
+int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
+int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 113791a62c38..0cc8967757ec 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/firmware.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
@@ -23,89 +24,207 @@
static int bhi_open(struct inode *mhi_inode, struct file *file_handle)
{
- file_handle->private_data = &mhi_devices.device_list[0];
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+
+ mhi_dev_ctxt = container_of(mhi_inode->i_cdev,
+ struct mhi_device_ctxt,
+ bhi_ctxt.cdev);
+ file_handle->private_data = mhi_dev_ctxt;
return 0;
}
-static ssize_t bhi_write(struct file *file,
- const char __user *buf,
- size_t count, loff_t *offp)
+static int bhi_alloc_bhie_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
+ size_t size,
+ struct bhie_vec_table *vec_table)
{
- int ret_val = 0;
- u32 pcie_word_val = 0;
- u32 i = 0;
- struct bhi_ctxt_t *bhi_ctxt =
- &(((struct mhi_pcie_dev_info *)file->private_data)->bhi_ctxt);
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &((struct mhi_pcie_dev_info *)file->private_data)->mhi_ctxt;
- size_t amount_copied = 0;
- uintptr_t align_len = 0x1000;
- u32 tx_db_val = 0;
- rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
- const long bhi_timeout_ms = 1000;
- long timeout;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
+ const phys_addr_t align = bhi_ctxt->alignment - 1;
+ size_t seg_size = bhi_ctxt->firmware_info.segment_size;
+ /* We need one additional entry for Vector Table */
+ int segments = DIV_ROUND_UP(size, seg_size) + 1;
+ int i;
+ struct scatterlist *sg_list;
+ struct bhie_mem_info *bhie_mem_info, *info;
- if (buf == NULL || 0 == count)
- return -EIO;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Total size:%lu total_seg:%d seg_size:%lu\n",
+ size, segments, seg_size);
- if (count > BHI_MAX_IMAGE_SIZE)
+ sg_list = kcalloc(segments, sizeof(*sg_list), GFP_KERNEL);
+ if (!sg_list)
return -ENOMEM;
- timeout = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
- msecs_to_jiffies(bhi_timeout_ms));
- if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
- return -EIO;
+ bhie_mem_info = kcalloc(segments, sizeof(*bhie_mem_info), GFP_KERNEL);
+ if (!bhie_mem_info)
+ goto alloc_bhi_mem_info_error;
- mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
+ /* Allocate buffers for bhi/e vector table */
+ for (i = 0; i < segments; i++) {
+ size_t size = seg_size;
- bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1),
- GFP_KERNEL);
+ /* Last entry if for vector table */
+ if (i == segments - 1)
+ size = sizeof(struct bhi_vec_entry) * i;
+ info = &bhie_mem_info[i];
+ info->size = size;
+ info->alloc_size = info->size + align;
+ info->pre_aligned =
+ dma_alloc_coherent(dev, info->alloc_size,
+ &info->dma_handle, GFP_KERNEL);
+ if (!info->pre_aligned)
+ goto alloc_dma_error;
+
+ info->phys_addr = (info->dma_handle + align) & ~align;
+ info->aligned = info->pre_aligned +
+ (info->phys_addr - info->dma_handle);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Seg:%d unaligned Img: 0x%llx aligned:0x%llx\n",
+ i, info->dma_handle, info->phys_addr);
+ }
+
+ sg_init_table(sg_list, segments);
+ sg_set_buf(sg_list, info->aligned, info->size);
+ sg_dma_address(sg_list) = info->phys_addr;
+ sg_dma_len(sg_list) = info->size;
+ vec_table->sg_list = sg_list;
+ vec_table->bhie_mem_info = bhie_mem_info;
+ vec_table->bhi_vec_entry = info->aligned;
+ vec_table->segment_count = segments;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "BHI/E table successfully allocated\n");
+ return 0;
+
+alloc_dma_error:
+ for (i = i - 1; i >= 0; i--)
+ dma_free_coherent(dev,
+ bhie_mem_info[i].alloc_size,
+ bhie_mem_info[i].pre_aligned,
+ bhie_mem_info[i].dma_handle);
+ kfree(bhie_mem_info);
+alloc_bhi_mem_info_error:
+ kfree(sg_list);
+ return -ENOMEM;
+}
+
+static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
+ size_t size)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ const phys_addr_t align_len = bhi_ctxt->alignment;
+ size_t alloc_size = size + (align_len - 1);
+ struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
+
+ bhi_ctxt->unaligned_image_loc =
+ dma_alloc_coherent(dev, alloc_size, &bhi_ctxt->dma_handle,
+ GFP_KERNEL);
if (bhi_ctxt->unaligned_image_loc == NULL)
return -ENOMEM;
- mhi_log(MHI_MSG_INFO, "Unaligned Img Loc: %p\n",
- bhi_ctxt->unaligned_image_loc);
- bhi_ctxt->image_loc =
- (void *)((uintptr_t)bhi_ctxt->unaligned_image_loc +
- (align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) %
- align_len)));
+ bhi_ctxt->alloc_size = alloc_size;
+ bhi_ctxt->phy_image_loc = (bhi_ctxt->dma_handle + (align_len - 1)) &
+ ~(align_len - 1);
+ bhi_ctxt->image_loc = bhi_ctxt->unaligned_image_loc +
+ (bhi_ctxt->phy_image_loc - bhi_ctxt->dma_handle);
+ bhi_ctxt->image_size = size;
- mhi_log(MHI_MSG_INFO, "Aligned Img Loc: %p\n", bhi_ctxt->image_loc);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "alloc_size:%lu image_size:%lu unal_addr:0x%llx0x al_addr:0x%llx\n",
+ bhi_ctxt->alloc_size, bhi_ctxt->image_size,
+ bhi_ctxt->dma_handle, bhi_ctxt->phy_image_loc);
- bhi_ctxt->image_size = count;
+ return 0;
+}
- if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) {
- ret_val = -ENOMEM;
- goto bhi_copy_error;
+/* Load firmware via bhie protocol */
+static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ const struct bhie_mem_info *bhie_mem_info =
+ &fw_table->bhie_mem_info[fw_table->segment_count - 1];
+ u32 val;
+ const u32 tx_sequence = fw_table->sequence++;
+ unsigned long timeout;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+
+ /* Program TX/RX Vector table */
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ return -EIO;
}
- amount_copied = count;
- /* Flush the writes, in anticipation for a device read */
- wmb();
- mhi_log(MHI_MSG_INFO,
- "Copied image from user at addr: %p\n", bhi_ctxt->image_loc);
- bhi_ctxt->phy_image_loc = dma_map_single(
- &mhi_dev_ctxt->dev_info->plat_dev->dev,
- bhi_ctxt->image_loc,
- bhi_ctxt->image_size,
- DMA_TO_DEVICE);
-
- if (dma_mapping_error(NULL, bhi_ctxt->phy_image_loc)) {
- ret_val = -EIO;
- goto bhi_copy_error;
+
+ val = HIGH_WORD(bhie_mem_info->phys_addr);
+ mhi_reg_write(mhi_dev_ctxt,
+ bhi_ctxt->bhi_base,
+ BHIE_TXVECADDR_HIGH_OFFS,
+ val);
+ val = LOW_WORD(bhie_mem_info->phys_addr);
+ mhi_reg_write(mhi_dev_ctxt,
+ bhi_ctxt->bhi_base,
+ BHIE_TXVECADDR_LOW_OFFS,
+ val);
+ val = (u32)bhie_mem_info->size;
+ mhi_reg_write(mhi_dev_ctxt,
+ bhi_ctxt->bhi_base,
+ BHIE_TXVECSIZE_OFFS,
+ val);
+
+ /* Ring DB to begin Xfer */
+ mhi_reg_write_field(mhi_dev_ctxt,
+ bhi_ctxt->bhi_base,
+ BHIE_TXVECDB_OFFS,
+ BHIE_TXVECDB_SEQNUM_BMSK,
+ BHIE_TXVECDB_SEQNUM_SHFT,
+ tx_sequence);
+ read_unlock_bh(pm_xfer_lock);
+
+ timeout = jiffies + msecs_to_jiffies(bhi_ctxt->poll_timeout);
+ while (time_before(jiffies, timeout)) {
+ u32 current_seq, status;
+
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ return -EIO;
+ }
+ val = mhi_reg_read(bhi_ctxt->bhi_base, BHIE_TXVECSTATUS_OFFS);
+ read_unlock_bh(pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "TXVEC_STATUS:0x%x\n", val);
+ current_seq = (val & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
+ BHIE_TXVECSTATUS_SEQNUM_SHFT;
+ status = (val & BHIE_TXVECSTATUS_STATUS_BMSK) >>
+ BHIE_TXVECSTATUS_STATUS_SHFT;
+ if ((status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
+ (current_seq == tx_sequence)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Image transfer complete\n");
+ return 0;
+ }
+ msleep(BHI_POLL_SLEEP_TIME_MS);
}
- mhi_log(MHI_MSG_INFO,
- "Mapped image to DMA addr 0x%lx:\n",
- (uintptr_t)bhi_ctxt->phy_image_loc);
- bhi_ctxt->image_size = count;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error xfering image via BHIE\n");
+ return -EIO;
+}
+
+static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ u32 pcie_word_val = 0;
+ u32 tx_db_val = 0;
+ unsigned long timeout;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
/* Write the image size */
read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock);
- goto bhi_copy_error;
+ return -EIO;
}
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
@@ -129,16 +248,15 @@ static ssize_t bhi_write(struct file *file,
pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
-
- mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
read_unlock_bh(pm_xfer_lock);
- for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
+ timeout = jiffies + msecs_to_jiffies(bhi_ctxt->poll_timeout);
+ while (time_before(jiffies, timeout)) {
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock);
- goto bhi_copy_error;
+ return -EIO;
}
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
@@ -149,34 +267,83 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS_MASK,
BHI_STATUS_SHIFT);
read_unlock_bh(pm_xfer_lock);
- mhi_log(MHI_MSG_CRITICAL,
- "BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
- tx_db_val, err, errdbg1, errdbg2, errdbg3);
- if (BHI_STATUS_SUCCESS != tx_db_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Incorrect BHI status: %d retry: %d\n",
- tx_db_val, i);
- else
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "%s 0x%x %s:0x%x %s:0x%x %s:0x%x %s:0x%x\n",
+ "BHI STATUS", tx_db_val,
+ "err", err,
+ "errdbg1", errdbg1,
+ "errdbg2", errdbg2,
+ "errdbg3", errdbg3);
+ if (tx_db_val == BHI_STATUS_SUCCESS)
break;
- usleep_range(20000, 25000);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "retrying...\n");
+ msleep(BHI_POLL_SLEEP_TIME_MS);
+ }
+
+ return (tx_db_val == BHI_STATUS_SUCCESS) ? 0 : -EIO;
+}
+
+static ssize_t bhi_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ int ret_val = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = file->private_data;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ long timeout;
+
+ if (buf == NULL || 0 == count)
+ return -EIO;
+
+ if (count > BHI_MAX_IMAGE_SIZE)
+ return -ENOMEM;
+
+ ret_val = bhi_alloc_pbl_xfer(mhi_dev_ctxt, count);
+ if (ret_val)
+ return -ENOMEM;
+
+ if (copy_from_user(bhi_ctxt->image_loc, buf, count)) {
+ ret_val = -ENOMEM;
+ goto bhi_copy_error;
}
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
- bhi_ctxt->phy_image_loc,
- bhi_ctxt->image_size, DMA_TO_DEVICE);
- kfree(bhi_ctxt->unaligned_image_loc);
+ timeout = wait_event_interruptible_timeout(
+ *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
+ msecs_to_jiffies(bhi_ctxt->poll_timeout));
+ if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI) {
+ ret_val = -EIO;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Timed out waiting for BHI\n");
+ goto bhi_copy_error;
+ }
+ ret_val = bhi_load_firmware(mhi_dev_ctxt);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to load bhi image\n");
+ }
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ bhi_ctxt->alloc_size,
+ bhi_ctxt->unaligned_image_loc,
+ bhi_ctxt->dma_handle);
+
+ /* Regardless of failure set to RESET state */
ret_val = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_RESET);
if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to start state change event\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to start state change event\n");
}
- return amount_copied;
+ return count;
bhi_copy_error:
- kfree(bhi_ctxt->unaligned_image_loc);
- return amount_copied;
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ bhi_ctxt->alloc_size,
+ bhi_ctxt->unaligned_image_loc,
+ bhi_ctxt->dma_handle);
+
+ return ret_val;
}
static const struct file_operations bhi_fops = {
@@ -184,48 +351,159 @@ static const struct file_operations bhi_fops = {
.open = bhi_open,
};
-int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
+int bhi_expose_dev_bhi(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct bhi_ctxt_t *bhi_ctxt = &mhi_pcie_device->bhi_ctxt;
- int ret_val = 0;
- int r;
+ int ret_val;
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ const struct pcie_core_info *core = &mhi_dev_ctxt->core;
+ char node_name[32];
- if (NULL == mhi_pcie_device || 0 == mhi_pcie_device->core.bar0_base
- || 0 == mhi_pcie_device->core.bar0_end)
- return -EIO;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Creating dev node\n");
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to alloc char device %d\n",
- ret_val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to alloc char device %d\n", ret_val);
return -EIO;
}
- bhi_ctxt->bhi_class = class_create(THIS_MODULE, "bhi");
- if (IS_ERR(bhi_ctxt->bhi_class)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to instantiate class %d\n",
- ret_val);
- r = PTR_RET(bhi_ctxt->bhi_class);
- goto err_class_create;
- }
cdev_init(&bhi_ctxt->cdev, &bhi_fops);
bhi_ctxt->cdev.owner = THIS_MODULE;
ret_val = cdev_add(&bhi_ctxt->cdev, bhi_ctxt->bhi_dev, 1);
- bhi_ctxt->dev = device_create(bhi_ctxt->bhi_class, NULL,
- bhi_ctxt->bhi_dev, NULL,
- "bhi");
+ snprintf(node_name, sizeof(node_name),
+ "bhi_%04X_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+ bhi_ctxt->dev = device_create(mhi_device_drv->mhi_bhi_class,
+ NULL,
+ bhi_ctxt->bhi_dev,
+ NULL,
+ node_name);
if (IS_ERR(bhi_ctxt->dev)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to add bhi cdev\n");
- r = PTR_RET(bhi_ctxt->dev);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to add bhi cdev\n");
+ ret_val = PTR_RET(bhi_ctxt->dev);
goto err_dev_create;
}
return 0;
+
err_dev_create:
cdev_del(&bhi_ctxt->cdev);
- class_destroy(bhi_ctxt->bhi_class);
-err_class_create:
unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1);
- return r;
+ return ret_val;
+}
+
+void bhi_firmware_download(struct work_struct *work)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ struct bhi_ctxt_t *bhi_ctxt;
+ int ret;
+ long timeout;
+
+ mhi_dev_ctxt = container_of(work, struct mhi_device_ctxt,
+ bhi_ctxt.fw_load_work);
+ bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
+
+ wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+
+ ret = bhi_load_firmware(mhi_dev_ctxt);
+ if (ret) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to Load sbl firmware\n");
+ return;
+ }
+ mhi_init_state_transition(mhi_dev_ctxt,
+ STATE_TRANSITION_RESET);
+
+ timeout = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE,
+ msecs_to_jiffies(bhi_ctxt->poll_timeout));
+ if (!timeout) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to Enter EXEC_ENV_BHIE\n");
+ return;
+ }
+
+ ret = bhi_load_bhie_firmware(mhi_dev_ctxt);
+ if (ret) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to Load amss firmware\n");
+ }
+}
+
+int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
+ struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
+ const struct firmware *firmware;
+ struct scatterlist *itr;
+ int ret, i;
+ size_t remainder;
+ const u8 *image;
+
+ /* expose dev node to userspace */
+ if (bhi_ctxt->manage_boot == false)
+ return bhi_expose_dev_bhi(mhi_dev_ctxt);
+
+ /* Make sure minimum buffer we allocate for BHI/E is >= sbl image */
+ while (fw_info->segment_size < fw_info->max_sbl_len)
+ fw_info->segment_size <<= 1;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "max sbl image size:%lu segment size:%lu\n",
+ fw_info->max_sbl_len, fw_info->segment_size);
+
+ /* Read the fw image */
+ ret = request_firmware(&firmware, fw_info->fw_image,
+ &mhi_dev_ctxt->plat_dev->dev);
+ if (ret) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error request firmware for:%s ret:%d\n",
+ fw_info->fw_image, ret);
+ return ret;
+ }
+
+ ret = bhi_alloc_bhie_xfer(mhi_dev_ctxt,
+ firmware->size,
+ fw_table);
+ if (ret) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error Allocating memory for firmware image\n");
+ release_firmware(firmware);
+ return ret;
+ }
+
+ /* Copy the fw image to vector table */
+ remainder = firmware->size;
+ image = firmware->data;
+ for (i = 0, itr = &fw_table->sg_list[1];
+ i < fw_table->segment_count - 1; i++, itr++) {
+ size_t to_copy = min(remainder, fw_info->segment_size);
+
+ memcpy(fw_table->bhie_mem_info[i].aligned, image, to_copy);
+ fw_table->bhi_vec_entry[i].phys_addr =
+ fw_table->bhie_mem_info[i].phys_addr;
+ fw_table->bhi_vec_entry[i].size = to_copy;
+ sg_set_buf(itr, fw_table->bhie_mem_info[i].aligned, to_copy);
+ sg_dma_address(itr) = fw_table->bhie_mem_info[i].phys_addr;
+ sg_dma_len(itr) = to_copy;
+ remainder -= to_copy;
+ image += to_copy;
+ }
+
+ /*
+ * Re-use BHI/E pointer for BHI since we guranteed BHI/E segment
+ * is >= to SBL image.
+ */
+ bhi_ctxt->phy_image_loc = sg_dma_address(&fw_table->sg_list[1]);
+ bhi_ctxt->image_size = fw_info->max_sbl_len;
+
+ fw_table->sequence++;
+ release_firmware(firmware);
+
+ /* Schedule a worker thread and wait for BHI Event */
+ schedule_work(&bhi_ctxt->fw_load_work);
+ return 0;
}
diff --git a/drivers/platform/msm/mhi/mhi_bhi.h b/drivers/platform/msm/mhi/mhi_bhi.h
index ca44f69cea42..15137ba5dfdf 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.h
+++ b/drivers/platform/msm/mhi/mhi_bhi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,38 @@
#define BHI_STATUS_SUCCESS (2)
#define BHI_STATUS_RESET (0)
+/* BHIE Offsets */
+#define BHIE_OFFSET (0x0124) /* BHIE register space offset from BHI base */
+#define BHIE_MSMSOCID_OFFS (BHIE_OFFSET + 0x0000)
+#define BHIE_TXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x002C)
+#define BHIE_TXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0030)
+#define BHIE_TXVECSIZE_OFFS (BHIE_OFFSET + 0x0034)
+#define BHIE_TXVECDB_OFFS (BHIE_OFFSET + 0x003C)
+#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECDB_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_OFFS (BHIE_OFFSET + 0x0044)
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
+#define BHIE_RXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x0060)
+#define BHIE_RXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0064)
+#define BHIE_RXVECSIZE_OFFS (BHIE_OFFSET + 0x0068)
+#define BHIE_RXVECDB_OFFS (BHIE_OFFSET + 0x0070)
+#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECDB_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_OFFS (BHIE_OFFSET + 0x0078)
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
+#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
+#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
+#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
+#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
+
#define BHI_MAJOR_VERSION 0x0
#define BHI_MINOR_VERSION 0x1
@@ -51,10 +83,12 @@
#define BHI_READBUF_SIZE sizeof(bhi_info_type)
#define BHI_MAX_IMAGE_SIZE (256 * 1024)
+#define BHI_DEFAULT_ALIGNMENT (0x1000)
-#define BHI_POLL_SLEEP_TIME 1000
-#define BHI_POLL_NR_RETRIES 10
+#define BHI_POLL_SLEEP_TIME_MS 100
+#define BHI_POLL_TIMEOUT_MS 2000
-int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device);
+int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
+void bhi_firmware_download(struct work_struct *work);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index fe163f3895a5..ae677bae63dc 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,41 +26,57 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
int r, i;
char dt_prop[MAX_BUF_SIZE];
const struct device_node *np =
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node;
+ mhi_dev_ctxt->plat_dev->dev.of_node;
r = of_property_read_u32(np, "mhi-event-rings",
&mhi_dev_ctxt->mmio_info.nr_event_rings);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to pull event ring info from DT, %d\n", r);
- goto dt_error;
+ return -EINVAL;
}
mhi_dev_ctxt->ev_ring_props =
kzalloc(sizeof(struct mhi_event_ring_cfg) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
- if (!mhi_dev_ctxt->ev_ring_props) {
- r = -ENOMEM;
- goto dt_error;
- }
+ if (!mhi_dev_ctxt->ev_ring_props)
+ return -ENOMEM;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+ u32 dt_configs[6];
+ int no_elements;
+
scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-event-cfg-", i);
- r = of_property_read_u32_array(np, dt_prop,
- (u32 *)&mhi_dev_ctxt->ev_ring_props[i],
- 4);
+ no_elements = of_property_count_elems_of_size(np, dt_prop,
+ sizeof(dt_configs));
+ if (no_elements != 1)
+ goto dt_error;
+ r = of_property_read_u32_array(
+ np,
+ dt_prop,
+ dt_configs,
+ sizeof(dt_configs) / sizeof(u32));
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to pull ev ring %d info from DT %d\n",
i, r);
goto dt_error;
}
- mhi_log(MHI_MSG_INFO,
- "Pulled ev ring %d,desc:0x%x,msi_vec:0x%x,intmod%d flags0x%x\n",
- i, mhi_dev_ctxt->ev_ring_props[i].nr_desc,
- mhi_dev_ctxt->ev_ring_props[i].msi_vec,
- mhi_dev_ctxt->ev_ring_props[i].intmod,
- mhi_dev_ctxt->ev_ring_props[i].flags);
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc = dt_configs[0];
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec = dt_configs[1];
+ mhi_dev_ctxt->ev_ring_props[i].intmod = dt_configs[2];
+ mhi_dev_ctxt->ev_ring_props[i].chan = dt_configs[3];
+ mhi_dev_ctxt->ev_ring_props[i].priority = dt_configs[4];
+ mhi_dev_ctxt->ev_ring_props[i].flags = dt_configs[5];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "ev ring %d,desc:0x%x,msi:0x%x,intmod%d chan:%u priority:%u flags0x%x\n",
+ i,
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+ mhi_dev_ctxt->ev_ring_props[i].intmod,
+ mhi_dev_ctxt->ev_ring_props[i].chan,
+ mhi_dev_ctxt->ev_ring_props[i].priority,
+ mhi_dev_ctxt->ev_ring_props[i].flags);
if (GET_EV_PROPS(EV_MANAGED,
mhi_dev_ctxt->ev_ring_props[i].flags))
mhi_dev_ctxt->ev_ring_props[i].mhi_handler_ptr =
@@ -76,14 +92,18 @@ int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->ev_ring_props[i].class = MHI_SW_RING;
mhi_dev_ctxt->mmio_info.nr_sw_event_rings++;
}
- mhi_log(MHI_MSG_INFO,
- "Detected %d SW EV rings and %d HW EV rings out of %d EV rings\n",
- mhi_dev_ctxt->mmio_info.nr_sw_event_rings,
- mhi_dev_ctxt->mmio_info.nr_hw_event_rings,
- mhi_dev_ctxt->mmio_info.nr_event_rings);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Detected %d SW EV rings and %d HW EV rings out of %d EV rings\n",
+ mhi_dev_ctxt->mmio_info.nr_sw_event_rings,
+ mhi_dev_ctxt->mmio_info.nr_hw_event_rings,
+ mhi_dev_ctxt->mmio_info.nr_event_rings);
}
+
+ return 0;
dt_error:
- return r;
+ kfree(mhi_dev_ctxt->ev_ring_props);
+ mhi_dev_ctxt->ev_ring_props = NULL;
+ return -EINVAL;
}
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -110,6 +130,9 @@ int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_local_event_ctxt[i];
spin_lock_init(&mhi_ring->ring_lock);
+ tasklet_init(&mhi_ring->ev_task, mhi_ev_task,
+ (unsigned long)mhi_ring);
+ INIT_WORK(&mhi_ring->ev_worker, process_event_ring);
}
return r;
@@ -139,6 +162,8 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
struct mhi_ring *ring,
+ struct mhi_device_ctxt *mhi_dev_ctxt,
+ int index,
u32 el_per_ring,
u32 intmodt_val,
u32 msi_vec,
@@ -148,6 +173,8 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
ev_list->mhi_msi_vector = msi_vec;
ev_list->mhi_event_ring_len = el_per_ring*sizeof(union mhi_event_pkt);
MHI_SET_EV_CTXT(EVENT_CTXT_INTMODT, ev_list, intmodt_val);
+ ring->mhi_dev_ctxt = mhi_dev_ctxt;
+ ring->index = index;
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
@@ -180,6 +207,7 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
+ mhi_dev_ctxt, i,
mhi_dev_ctxt->ev_ring_props[i].nr_desc,
mhi_dev_ctxt->ev_ring_props[i].intmod,
mhi_dev_ctxt->ev_ring_props[i].msi_vec,
@@ -195,7 +223,7 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
int ret_val = 0;
u32 i;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
if (GET_EV_PROPS(EV_TYPE,
mhi_dev_ctxt->ev_ring_props[i].flags) == type &&
@@ -207,9 +235,10 @@ int init_local_ev_ring_by_type(struct mhi_device_ctxt *mhi_dev_ctxt,
return ret_val;
}
ring_ev_db(mhi_dev_ctxt, i);
- mhi_log(MHI_MSG_INFO, "Finished ev ring init %d\n", i);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Finished ev ring init %d\n", i);
}
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -228,7 +257,7 @@ int mhi_add_elements_to_event_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
MHI_ER_DATA_TYPE);
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unrecognized event stage, %d\n", new_state);
ret_val = -EINVAL;
break;
@@ -247,23 +276,18 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
spinlock_t *lock = &event_ctxt->ring_lock;
- if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
- mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
- return -EINVAL;
- }
-
spin_lock_irqsave(lock, flags);
- mhi_log(MHI_MSG_INFO, "mmio_addr = 0x%p, mmio_len = 0x%llx\n",
- mhi_dev_ctxt->mmio_info.mmio_addr,
- mhi_dev_ctxt->mmio_info.mmio_len);
- mhi_log(MHI_MSG_INFO, "Initializing event ring %d with %d desc\n",
- ring_index, nr_ev_el);
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_INFO,
+ "Initializing event ring %d with %d desc\n",
+ ring_index,
+ nr_ev_el);
for (i = 0; i < nr_ev_el - 1; ++i) {
ret_val = ctxt_add_element(event_ctxt, (void *)&ev_pkt);
if (0 != ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to insert el in ev ctxt\n");
break;
}
@@ -279,7 +303,8 @@ void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_event_ctxt *ev_ctxt;
struct mhi_ring *local_ev_ctxt;
- mhi_log(MHI_MSG_VERBOSE, "Resetting event index %d\n", index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Resetting event index %d\n", index);
ev_ctxt =
&mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[index];
local_ev_ctxt =
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index 395e19c91f35..f1c562974816 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,12 +32,11 @@
#include "mhi_hwio.h"
#include "mhi_bhi.h"
-struct mhi_pcie_devices mhi_devices;
+struct mhi_device_driver *mhi_device_drv;
static int mhi_pci_probe(struct pci_dev *pcie_device,
const struct pci_device_id *mhi_device_id);
static int __exit mhi_plat_remove(struct platform_device *pdev);
-void *mhi_ipc_log;
static DEFINE_PCI_DEVICE_TABLE(mhi_pcie_device_id) = {
{ MHI_PCIE_VENDOR_ID, MHI_PCIE_DEVICE_ID_9x35,
@@ -59,129 +58,71 @@ static const struct of_device_id mhi_plat_match[] = {
static void mhi_msm_fixup(struct pci_dev *pcie_device)
{
if (pcie_device->class == PCI_CLASS_NOT_DEFINED) {
- mhi_log(MHI_MSG_INFO, "Setting msm pcie class\n");
pcie_device->class = PCI_CLASS_STORAGE_SCSI;
}
}
-int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int mhi_ctxt_init(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
- u32 i = 0, j = 0;
- u32 requested_msi_number = 32, actual_msi_number = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- struct pci_dev *pcie_device = NULL;
+ u32 j = 0;
- if (NULL == mhi_pcie_dev)
- return -EINVAL;
- pcie_device = mhi_pcie_dev->pcie_device;
-
- ret_val = mhi_init_pcie_device(mhi_pcie_dev);
- if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initialize pcie device, ret %d\n",
- ret_val);
- return -ENODEV;
- }
- ret_val = mhi_init_device_ctxt(mhi_pcie_dev, &mhi_pcie_dev->mhi_ctxt);
- if (ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initialize main MHI ctxt ret %d\n",
- ret_val);
- goto msi_config_err;
- }
- ret_val = mhi_esoc_register(&mhi_pcie_dev->mhi_ctxt);
+ ret_val = mhi_init_device_ctxt(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register with esoc ret %d.\n",
- ret_val);
- }
-
- device_disable_async_suspend(&pcie_device->dev);
- ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
- if (IS_ERR_VALUE(ret_val)) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to enable MSIs for pcie dev ret_val %d.\n",
- ret_val);
- goto msi_config_err;
- } else if (ret_val) {
- mhi_log(MHI_MSG_INFO,
- "Hrmmm, got fewer MSIs than we requested. Requested %d, got %d.\n",
- requested_msi_number, ret_val);
- actual_msi_number = ret_val;
- } else {
- mhi_log(MHI_MSG_VERBOSE,
- "Got all requested MSIs, moving on\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to initialize main MHI ctxt ret %d\n", ret_val);
+ return ret_val;
}
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
for (j = 0; j < mhi_dev_ctxt->mmio_info.nr_event_rings; j++) {
- mhi_log(MHI_MSG_VERBOSE,
- "MSI_number = %d, event ring number = %d\n",
- mhi_dev_ctxt->ev_ring_props[j].msi_vec, j);
-
- ret_val = request_irq(pcie_device->irq +
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "MSI_number = %d, event ring number = %d\n",
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec, j);
+
+ /* outside of requested irq boundary */
+ if (mhi_dev_ctxt->core.max_nr_msis <=
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "max msi supported:%d request:%d ev:%d\n",
+ mhi_dev_ctxt->core.max_nr_msis,
+ mhi_dev_ctxt->ev_ring_props[j].msi_vec,
+ j);
+ goto irq_error;
+ }
+ ret_val = request_irq(mhi_dev_ctxt->core.irq_base +
mhi_dev_ctxt->ev_ring_props[j].msi_vec,
mhi_dev_ctxt->ev_ring_props[j].mhi_handler_ptr,
IRQF_NO_SUSPEND,
"mhi_drv",
- (void *)&pcie_device->dev);
+ (void *)mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register handler for MSI ret_val = %d\n",
- ret_val);
- goto msi_config_err;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to register handler for MSI ret_val = %d\n",
+ ret_val);
+ goto irq_error;
}
}
- mhi_pcie_dev->core.irq_base = pcie_device->irq;
- mhi_log(MHI_MSG_VERBOSE,
- "Setting IRQ Base to 0x%x\n", mhi_pcie_dev->core.irq_base);
- mhi_pcie_dev->core.max_nr_msis = requested_msi_number;
- ret_val = mhi_init_pm_sysfs(&pcie_device->dev);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to setup sysfs ret %d\n",
- ret_val);
- goto sysfs_config_err;
- }
- if (!mhi_init_debugfs(&mhi_pcie_dev->mhi_ctxt))
- mhi_log(MHI_MSG_ERROR, "Failed to init debugfs.\n");
-
- mhi_pcie_dev->mhi_ctxt.mmio_info.mmio_addr =
- mhi_pcie_dev->core.bar0_base;
- pcie_device->dev.platform_data = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->mhi_ctxt.dev_info->plat_dev->dev.platform_data =
- &mhi_pcie_dev->mhi_ctxt;
- ret_val = mhi_reg_notifiers(&mhi_pcie_dev->mhi_ctxt);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to register for notifiers\n");
- goto mhi_state_transition_error;
- }
- mhi_log(MHI_MSG_INFO,
- "Finished all driver probing returning ret_val %d.\n",
- ret_val);
- return ret_val;
-mhi_state_transition_error:
+ mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->core.bar0_base;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "exit\n");
+ return 0;
+
+irq_error:
kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
- kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- dma_free_coherent(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
kfree(mhi_dev_ctxt->ev_ring_props);
- mhi_rem_pm_sysfs(&pcie_device->dev);
-sysfs_config_err:
- for (; i >= 0; --i)
- free_irq(pcie_device->irq + i, &pcie_device->dev);
- debugfs_remove_recursive(mhi_pcie_dev->mhi_ctxt.mhi_parent_folder);
-msi_config_err:
- pci_disable_device(pcie_device);
- return ret_val;
+ for (j = j - 1; j >= 0; --j)
+ free_irq(mhi_dev_ctxt->core.irq_base + j, NULL);
+
+ return -EINVAL;
}
static const struct dev_pm_ops pm_ops = {
@@ -204,73 +145,154 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
const struct pci_device_id *mhi_device_id)
{
int ret_val = 0;
- struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
struct platform_device *plat_dev;
- struct mhi_device_ctxt *mhi_dev_ctxt;
- u32 nr_dev = mhi_devices.nr_of_devices;
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
+ u32 domain = pci_domain_nr(pcie_device->bus);
+ u32 bus = pcie_device->bus->number;
+ u32 dev_id = pcie_device->device;
+ u32 slot = PCI_SLOT(pcie_device->devfn);
+ unsigned long msi_requested, msi_required;
+ struct msm_pcie_register_event *mhi_pci_link_event;
+
+ /* Find correct device context based on bdf & dev_id */
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct pcie_core_info *core = &itr->core;
+
+ if (core->domain == domain &&
+ core->bus == bus &&
+ core->dev_id == dev_id &&
+ core->slot == slot) {
+ mhi_dev_ctxt = itr;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
+ if (!mhi_dev_ctxt)
+ return -EPROBE_DEFER;
- mhi_log(MHI_MSG_INFO, "Entering\n");
- mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
- if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
- mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
- return -ENOMEM;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing Domain:%02u Bus:%04u dev:0x%04x slot:%04u\n",
+ domain, bus, dev_id, slot);
+
+ ret_val = of_property_read_u32(mhi_dev_ctxt->plat_dev->dev.of_node,
+ "mhi-event-rings",
+ (u32 *)&msi_required);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to pull ev ring info from DT, %d\n", ret_val);
+ return ret_val;
}
- mhi_devices.nr_of_devices++;
- plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
+ plat_dev = mhi_dev_ctxt->plat_dev;
pcie_device->dev.of_node = plat_dev->dev.of_node;
- mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+ INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
mutex_init(&mhi_dev_ctxt->pm_lock);
rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
- tasklet_init(&mhi_dev_ctxt->ev_task,
- mhi_ctrl_ev_task,
- (unsigned long)mhi_dev_ctxt);
-
+ init_completion(&mhi_dev_ctxt->cmd_complete);
mhi_dev_ctxt->flags.link_up = 1;
- ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
- mhi_pcie_dev->pcie_device = pcie_device;
- mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
- mhi_pcie_dev->mhi_pci_link_event.events =
- (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
- mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
- mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
- mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
- ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
+
+ /* Setup bus scale */
+ mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(plat_dev);
+ if (!mhi_dev_ctxt->bus_scale_table)
+ return -ENODATA;
+ mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+ (mhi_dev_ctxt->bus_scale_table);
+ if (!mhi_dev_ctxt->bus_client)
+ return -EINVAL;
+ mhi_set_bus_request(mhi_dev_ctxt, 1);
+
+ mhi_dev_ctxt->pcie_device = pcie_device;
+
+ mhi_pci_link_event = &mhi_dev_ctxt->mhi_pci_link_event;
+ mhi_pci_link_event->events =
+ (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
+ mhi_pci_link_event->user = pcie_device;
+ mhi_pci_link_event->callback = mhi_link_state_cb;
+ mhi_pci_link_event->notify.data = mhi_dev_ctxt;
+ ret_val = msm_pcie_register_event(mhi_pci_link_event);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register for link notifications %d.\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to reg for link notifications %d\n", ret_val);
+ return ret_val;
+ }
+
+ dev_set_drvdata(&pcie_device->dev, mhi_dev_ctxt);
+
+ mhi_dev_ctxt->core.pci_master = true;
+ ret_val = mhi_init_pcie_device(mhi_dev_ctxt);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_CRITICAL,
+ "Failed to initialize pcie device, ret %d\n",
ret_val);
return ret_val;
}
+ pci_set_master(pcie_device);
+ device_disable_async_suspend(&pcie_device->dev);
- /* Initialize MHI CNTXT */
- ret_val = mhi_ctxt_init(mhi_pcie_dev);
+ ret_val = mhi_esoc_register(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "MHI Initialization failed, ret %d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to reg with esoc ret %d\n", ret_val);
+ }
+
+ /* # of MSI requested must be power of 2 */
+ msi_requested = 1 << find_last_bit(&msi_required, 32);
+ if (msi_requested < msi_required)
+ msi_requested <<= 1;
+
+ ret_val = pci_enable_msi_range(pcie_device, 1, msi_requested);
+ if (IS_ERR_VALUE(ret_val) || (ret_val < msi_requested)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to enable MSIs for pcie dev ret_val %d.\n",
ret_val);
+ return -EIO;
+ }
+
+ mhi_dev_ctxt->core.max_nr_msis = msi_requested;
+ mhi_dev_ctxt->core.irq_base = pcie_device->irq;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Setting IRQ Base to 0x%x\n", mhi_dev_ctxt->core.irq_base);
+
+ /* Initialize MHI CNTXT */
+ ret_val = mhi_ctxt_init(mhi_dev_ctxt);
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n", ret_val);
goto deregister_pcie;
}
- pci_set_master(mhi_pcie_dev->pcie_device);
+
+ mhi_init_pm_sysfs(&pcie_device->dev);
+ mhi_init_debugfs(mhi_dev_ctxt);
+ mhi_reg_notifiers(mhi_dev_ctxt);
+
+ /* setup shadow pm functions */
+ mhi_dev_ctxt->assert_wake = mhi_assert_device_wake;
+ mhi_dev_ctxt->deassert_wake = mhi_deassert_device_wake;
+ mhi_dev_ctxt->runtime_get = mhi_master_mode_runtime_get;
+ mhi_dev_ctxt->runtime_put = mhi_master_mode_runtime_put;
mutex_lock(&mhi_dev_ctxt->pm_lock);
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
- ret_val = set_mhi_base_state(mhi_pcie_dev);
+ ret_val = set_mhi_base_state(mhi_dev_ctxt);
+
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt,
+ MHI_MSG_ERROR,
"Error Setting MHI Base State %d\n", ret_val);
goto unlock_pm_lock;
}
if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
- ret_val = bhi_probe(mhi_pcie_dev);
+ ret_val = bhi_probe(mhi_dev_ctxt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error with bhi_probe ret:%d", ret_val);
goto unlock_pm_lock;
}
@@ -312,33 +334,144 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
unlock_pm_lock:
mutex_unlock(&mhi_dev_ctxt->pm_lock);
deregister_pcie:
- msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
+ msm_pcie_deregister_event(&mhi_dev_ctxt->mhi_pci_link_event);
return ret_val;
}
static int mhi_plat_probe(struct platform_device *pdev)
{
- u32 nr_dev = mhi_devices.nr_of_devices;
+ int r = 0, len;
struct mhi_device_ctxt *mhi_dev_ctxt;
- int r = 0;
+ struct pcie_core_info *core;
+ char node[32];
+ struct device_node *of_node = pdev->dev.of_node;
+ u64 address_window[2];
- mhi_log(MHI_MSG_INFO, "Entered\n");
- mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+ if (of_node == NULL)
+ return -ENODEV;
- mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
- if (!mhi_dev_ctxt->bus_scale_table)
- return -ENODATA;
- mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
- (mhi_dev_ctxt->bus_scale_table);
- if (!mhi_dev_ctxt->bus_client)
- return -EINVAL;
+ pdev->id = of_alias_get_id(of_node, "mhi");
+ if (pdev->id < 0)
+ return -ENODEV;
- mhi_devices.device_list[nr_dev].plat_dev = pdev;
- r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
+ mhi_dev_ctxt = devm_kzalloc(&pdev->dev,
+ sizeof(*mhi_dev_ctxt),
+ GFP_KERNEL);
+ if (!mhi_dev_ctxt)
+ return -ENOMEM;
+
+ if (!of_find_property(of_node, "qcom,mhi-address-window", &len))
+ return -ENODEV;
+
+ if (len != sizeof(address_window))
+ return -ENODEV;
+
+ r = of_property_read_u64_array(of_node,
+ "qcom,mhi-address-window",
+ address_window,
+ sizeof(address_window) / sizeof(u64));
+ if (r)
+ return r;
+
+ core = &mhi_dev_ctxt->core;
+ r = of_property_read_u32(of_node, "qcom,pci-dev_id", &core->dev_id);
+ if (r)
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-slot", &core->slot);
+ if (r)
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-domain", &core->domain);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
+ return r;
+
+ r = of_property_read_u32(of_node, "qcom,pci-bus", &core->bus);
+ if (r)
+ return r;
+
+ snprintf(node, sizeof(node),
+ "mhi_%04x_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+ mhi_dev_ctxt->mhi_ipc_log =
+ ipc_log_context_create(MHI_IPC_LOG_PAGES, node, 0);
+ if (!mhi_dev_ctxt->mhi_ipc_log)
+ pr_err("%s: Error creating ipc_log buffer\n", __func__);
+
+ r = of_property_read_u32(of_node, "qcom,mhi-ready-timeout",
+ &mhi_dev_ctxt->poll_reset_timeout_ms);
+ if (r)
+ mhi_dev_ctxt->poll_reset_timeout_ms =
+ MHI_READY_STATUS_TIMEOUT_MS;
+
+ mhi_dev_ctxt->dev_space.start_win_addr = address_window[0];
+ mhi_dev_ctxt->dev_space.end_win_addr = address_window[1];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Start Addr:0x%llx End_Addr:0x%llx\n",
+ mhi_dev_ctxt->dev_space.start_win_addr,
+ mhi_dev_ctxt->dev_space.end_win_addr);
+
+ r = of_property_read_u32(of_node, "qcom,bhi-alignment",
+ &mhi_dev_ctxt->bhi_ctxt.alignment);
+ if (r)
+ mhi_dev_ctxt->bhi_ctxt.alignment = BHI_DEFAULT_ALIGNMENT;
+
+ r = of_property_read_u32(of_node, "qcom,bhi-poll-timeout",
+ &mhi_dev_ctxt->bhi_ctxt.poll_timeout);
+ if (r)
+ mhi_dev_ctxt->bhi_ctxt.poll_timeout = BHI_POLL_TIMEOUT_MS;
+
+ mhi_dev_ctxt->bhi_ctxt.manage_boot =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mhi-manage-boot");
+ if (mhi_dev_ctxt->bhi_ctxt.manage_boot) {
+ struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
+ struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
+
+ r = of_property_read_string(of_node, "qcom,mhi-fw-image",
+ &fw_info->fw_image);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error reading DT node 'qcom,mhi-fw-image'\n");
+ return r;
+ }
+ r = of_property_read_u32(of_node, "qcom,mhi-max-sbl",
+ (u32 *)&fw_info->max_sbl_len);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error reading DT node 'qcom,mhi-max-sbl'\n");
+ return r;
+ }
+ r = of_property_read_u32(of_node, "qcom,mhi-sg-size",
+ (u32 *)&fw_info->segment_size);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error reading DT node 'qcom,mhi-sg-size'\n");
+ return r;
+ }
+ INIT_WORK(&bhi_ctxt->fw_load_work, bhi_firmware_download);
+ }
+
+ mhi_dev_ctxt->flags.bb_required =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mhi-bb-required");
+
+ mhi_dev_ctxt->plat_dev = pdev;
+ platform_set_drvdata(pdev, mhi_dev_ctxt);
+
+ r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to set mask for DMA ret %d\n", r);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ return r;
+ }
+
+ mhi_dev_ctxt->parent = mhi_device_drv->parent;
+ mutex_lock(&mhi_device_drv->lock);
+ list_add_tail(&mhi_dev_ctxt->node, &mhi_device_drv->head);
+ mutex_unlock(&mhi_device_drv->lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
+
return 0;
}
@@ -354,42 +487,59 @@ static struct platform_driver mhi_plat_driver = {
static void __exit mhi_exit(void)
{
- ipc_log_context_destroy(mhi_ipc_log);
pci_unregister_driver(&mhi_pcie_driver);
platform_driver_unregister(&mhi_plat_driver);
}
static int __exit mhi_plat_remove(struct platform_device *pdev)
{
- platform_driver_unregister(&mhi_plat_driver);
+ struct mhi_device_ctxt *mhi_dev_ctxt = platform_get_drvdata(pdev);
+
+ ipc_log_context_destroy(mhi_dev_ctxt->mhi_ipc_log);
return 0;
}
static int __init mhi_init(void)
{
int r;
+ struct mhi_device_driver *mhi_dev_drv;
+
+ mhi_dev_drv = kmalloc(sizeof(*mhi_dev_drv), GFP_KERNEL);
+ if (mhi_dev_drv == NULL)
+ return -ENOMEM;
+
+ mutex_init(&mhi_dev_drv->lock);
+ mutex_lock(&mhi_dev_drv->lock);
+ INIT_LIST_HEAD(&mhi_dev_drv->head);
+ mutex_unlock(&mhi_dev_drv->lock);
+ mhi_dev_drv->mhi_bhi_class = class_create(THIS_MODULE, "bhi");
+ if (IS_ERR(mhi_dev_drv->mhi_bhi_class)) {
+ pr_err("Error creating mhi_bhi_class\n");
+ goto class_error;
+ }
+ mhi_dev_drv->parent = debugfs_create_dir("mhi", NULL);
+ mhi_device_drv = mhi_dev_drv;
- mhi_log(MHI_MSG_INFO, "Entered\n");
r = platform_driver_register(&mhi_plat_driver);
if (r) {
- mhi_log(MHI_MSG_INFO, "Failed to probe platform ret %d\n", r);
- return r;
+ pr_err("%s: Failed to probe platform ret %d\n", __func__, r);
+ goto platform_error;
}
r = pci_register_driver(&mhi_pcie_driver);
if (r) {
- mhi_log(MHI_MSG_INFO,
- "Failed to register pcie drv ret %d\n", r);
+ pr_err("%s: Failed to register pcie drv ret %d\n", __func__, r);
goto error;
}
- mhi_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES, "mhi", 0);
- if (!mhi_ipc_log) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to create IPC logging context\n");
- }
- mhi_log(MHI_MSG_INFO, "Exited\n");
+
return 0;
error:
- pci_unregister_driver(&mhi_pcie_driver);
+ platform_driver_unregister(&mhi_plat_driver);
+platform_error:
+ class_destroy(mhi_device_drv->mhi_bhi_class);
+
+class_error:
+ kfree(mhi_dev_drv);
+ mhi_device_drv = NULL;
return r;
}
@@ -407,7 +557,7 @@ DECLARE_PCI_FIXUP_HEADER(MHI_PCIE_VENDOR_ID,
module_exit(mhi_exit);
-module_init(mhi_init);
+subsys_initcall(mhi_init);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("MHI_CORE");
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index a496c81239bf..b6edf707798b 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,12 +53,12 @@ size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
(NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt)) +
(mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt));
- mhi_log(MHI_MSG_INFO, "Reserved %zd bytes for context info\n",
- mhi_dev_mem);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reserved %zd bytes for context info\n", mhi_dev_mem);
/*Calculate size needed for cmd TREs */
mhi_dev_mem += (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
- mhi_log(MHI_MSG_INFO, "Final bytes for MHI device space %zd\n",
- mhi_dev_mem);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Final bytes for MHI device space %zd\n", mhi_dev_mem);
return mhi_dev_mem;
}
@@ -105,23 +105,6 @@ void init_local_chan_ctxt(struct mhi_ring *chan_ctxt,
chan_ctxt->overwrite_en = 0;
}
-int populate_bb_list(struct list_head *bb_list, int num_bb)
-{
- struct mhi_buf_info *mhi_buf = NULL;
- int i;
-
- for (i = 0; i < num_bb; ++i) {
- mhi_buf = kzalloc(sizeof(struct mhi_buf_info), GFP_KERNEL);
- if (!mhi_buf)
- return -ENOMEM;
- mhi_buf->bb_p_addr = 0;
- mhi_buf->bb_v_addr = NULL;
- mhi_log(MHI_MSG_INFO,
- "Allocated BB v_addr 0x%p, p_addr 0x%llx\n",
- mhi_buf->bb_v_addr, (u64)mhi_buf->bb_p_addr);
- }
- return 0;
-}
/**
* mhi_cmd_ring_init- Initialization of the command ring
*
@@ -153,91 +136,6 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
return 0;
}
-/*
- * The device can have severe addressing limitations, and in this case
- * the MHI driver may be restricted on where memory can be allocated.
- *
- * The allocation of the MHI control data structures takes place as one
- * big, physically contiguous allocation.
- * The device's addressing window, must be placed around that control segment
- * allocation.
- * Here we attempt to do this by building an addressing window around the
- * initial allocated control segment.
- *
- * The window size is specified by the device and must be contiguous,
- * but depending on where the control segment was allocated, it may be
- * necessary to leave more room, before the ctrl segment start or after
- * the ctrl segment end.
- * The following assumptions are made:
- * Assumption: 1. size of allocated ctrl seg << (device allocation window / 2)
- * 2. allocated ctrl seg is physically contiguous
- */
-static int calculate_mhi_addressing_window(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- u64 dma_dev_mem_start = 0;
- u64 dma_seg_size = 0;
- u64 dma_max_addr = (dma_addr_t)(-1);
- u64 dev_address_limit = 0;
- int r = 0;
- const struct device_node *np =
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node;
-
- dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
- r = of_property_read_u64(np, "mhi-dev-address-win-size",
- &dev_address_limit);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to get device addressing limit ret %d",
- r);
- return r;
- }
- /* Mask off the last 3 bits for address calculation */
- dev_address_limit &= ~0x7;
- mhi_log(MHI_MSG_INFO, "Device Addressing limit 0x%llx\n",
- dev_address_limit);
- dma_seg_size = dev_address_limit / 2;
-
- /*
- * The region of the allocated control segment is within the
- * first half of the device's addressing limit
- */
- if (dma_dev_mem_start < dma_seg_size) {
- mhi_dev_ctxt->dev_space.start_win_addr = 0;
- mhi_dev_ctxt->dev_space.end_win_addr =
- dma_dev_mem_start + dma_seg_size +
- (dma_seg_size - dma_dev_mem_start);
- } else if (dma_dev_mem_start >= dma_seg_size &&
- dma_dev_mem_start <= (dma_max_addr - dma_seg_size)) {
- /*
- * The start of the control segment is located past
- * halfway point of the device's addressing limit
- * Place the control segment in the middle of the device's
- * addressing range
- */
- mhi_dev_ctxt->dev_space.start_win_addr =
- dma_dev_mem_start - dma_seg_size;
- mhi_dev_ctxt->dev_space.end_win_addr =
- dma_dev_mem_start + dma_seg_size;
- } else if (dma_dev_mem_start > (dma_max_addr - dma_seg_size)) {
- /*
- * The start of the control segment is located at the tail end
- * of the host addressing space. Leave extra addressing space
- * at window start
- */
- mhi_dev_ctxt->dev_space.start_win_addr = dma_dev_mem_start;
- mhi_dev_ctxt->dev_space.start_win_addr -=
- dma_seg_size + (dma_seg_size -
- (dma_max_addr - dma_dev_mem_start));
- mhi_dev_ctxt->dev_space.end_win_addr = dma_max_addr;
- }
- mhi_log(MHI_MSG_INFO,
- "MHI start address at 0x%llx, Window Start 0x%llx Window End 0x%llx\n",
- (u64)dma_dev_mem_start,
- (u64)mhi_dev_ctxt->dev_space.start_win_addr,
- (u64)mhi_dev_ctxt->dev_space.end_win_addr);
- return 0;
-}
-
int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
{
size_t mhi_mem_index = 0, ring_len;
@@ -249,12 +147,12 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
calculate_mhi_space(mhi_dev_ctxt);
mhi_dev_ctxt->dev_space.dev_mem_start =
- dma_alloc_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_alloc_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
&mhi_dev_ctxt->dev_space.dma_dev_mem_start,
GFP_KERNEL);
if (!mhi_dev_ctxt->dev_space.dev_mem_start) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to allocate memory of size %zd bytes\n",
mhi_dev_ctxt->dev_space.dev_mem_len);
return -ENOMEM;
@@ -263,26 +161,20 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
dma_dev_mem_start = mhi_dev_ctxt->dev_space.dma_dev_mem_start;
memset(dev_mem_start, 0, mhi_dev_ctxt->dev_space.dev_mem_len);
- r = calculate_mhi_addressing_window(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to calculate addressing window ret %d", r);
- return r;
- }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Starting Seg address: virt 0x%p, dma 0x%llx\n",
+ dev_mem_start, (u64)dma_dev_mem_start);
- mhi_log(MHI_MSG_INFO, "Starting Seg address: virt 0x%p, dma 0x%llx\n",
- dev_mem_start, (u64)dma_dev_mem_start);
-
- mhi_log(MHI_MSG_INFO, "Initializing CCABAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CCABAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cc_list = dev_mem_start;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list = dma_dev_mem_start;
mhi_mem_index += MHI_MAX_CHANNELS * sizeof(struct mhi_chan_ctxt);
- mhi_log(MHI_MSG_INFO, "Initializing CRCBAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CRCBAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.cmd_ctxt =
dev_mem_start + mhi_mem_index;
@@ -290,9 +182,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
dma_dev_mem_start + mhi_mem_index;
mhi_mem_index += NR_OF_CMD_RINGS * sizeof(struct mhi_chan_ctxt);
- mhi_log(MHI_MSG_INFO, "Initializing ECABAP at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing ECABAP at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list =
dev_mem_start + mhi_mem_index;
mhi_dev_ctxt->dev_space.ring_ctxt.dma_ec_list =
@@ -300,10 +192,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_mem_index += mhi_dev_ctxt->mmio_info.nr_event_rings *
sizeof(struct mhi_event_ctxt);
- mhi_log(MHI_MSG_INFO,
- "Initializing CMD context at virt 0x%p, dma 0x%llx\n",
- dev_mem_start + mhi_mem_index,
- (u64)dma_dev_mem_start + mhi_mem_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Initializing CMD context at dma 0x%llx\n",
+ (u64)dma_dev_mem_start + mhi_mem_index);
/* TODO: Initialize both the local and device cmd context */
ring_len = (CMD_EL_PER_RING * sizeof(union mhi_cmd_pkt));
@@ -322,7 +213,7 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
ring_len = sizeof(union mhi_event_pkt) *
mhi_dev_ctxt->ev_ring_props[i].nr_desc;
ring_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
ring_len, &ring_dma_addr, GFP_KERNEL);
if (!ring_addr)
goto err_ev_alloc;
@@ -330,9 +221,9 @@ int init_mhi_dev_mem(struct mhi_device_ctxt *mhi_dev_ctxt)
ring_dma_addr, ring_len);
init_local_ev_ctxt(&mhi_dev_ctxt->mhi_local_event_ctxt[i],
ring_addr, ring_len);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Initializing EV_%d TRE list at virt 0x%p dma 0x%llx\n",
- i, ring_addr, (u64)ring_dma_addr);
+ i, ring_addr, (u64)ring_dma_addr);
}
return 0;
@@ -344,12 +235,12 @@ err_ev_alloc:
dev_ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
ev_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
ev_ctxt->len,
ev_ctxt->base,
dev_ev_ctxt->mhi_event_ring_base_addr);
}
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@@ -359,44 +250,28 @@ err_ev_alloc:
static int mhi_init_events(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq = kmalloc(
- sizeof(wait_queue_head_t),
- GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
- return -ENOMEM;
- }
- mhi_dev_ctxt->mhi_ev_wq.state_change_event =
- kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_ev_wq.state_change_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
- goto error_event_handle_alloc;
- }
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.m0_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m0_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_state_change_event_handle;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.m3_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.m3_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_m0_event;
}
/* Initialize the event which signals M0 */
mhi_dev_ctxt->mhi_ev_wq.bhi_event = kmalloc(sizeof(wait_queue_head_t),
GFP_KERNEL);
if (NULL == mhi_dev_ctxt->mhi_ev_wq.bhi_event) {
- mhi_log(MHI_MSG_ERROR, "Failed to init event");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Failed to init event");
goto error_bhi_event;
}
- /* Initialize the event which starts the event parsing thread */
- init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- /* Initialize the event which starts the state change thread */
- init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
+
/* Initialize the event which triggers clients waiting to send */
init_waitqueue_head(mhi_dev_ctxt->mhi_ev_wq.m0_event);
/* Initialize the event which triggers D3hot */
@@ -409,9 +284,6 @@ error_bhi_event:
error_m0_event:
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
error_state_change_event_handle:
- kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
-error_event_handle_alloc:
- kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
return -ENOMEM;
}
@@ -448,102 +320,70 @@ static void mhi_init_wakelock(struct mhi_device_ctxt *mhi_dev_ctxt)
wakeup_source_init(&mhi_dev_ctxt->w_lock, "mhi_wakeup_source");
}
-static int mhi_spawn_threads(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- mhi_dev_ctxt->event_thread_handle = kthread_run(parse_event_thread,
- mhi_dev_ctxt,
- "mhi_ev_thrd");
- if (IS_ERR(mhi_dev_ctxt->event_thread_handle))
- return PTR_ERR(mhi_dev_ctxt->event_thread_handle);
- mhi_dev_ctxt->st_thread_handle = kthread_run(mhi_state_change_thread,
- mhi_dev_ctxt,
- "mhi_st_thrd");
- if (IS_ERR(mhi_dev_ctxt->event_thread_handle))
- return PTR_ERR(mhi_dev_ctxt->event_thread_handle);
- return 0;
-}
-
/**
* @brief Main initialization function for a mhi struct device context
* All threads, events mutexes, mhi specific data structures
* are initialized here
*
- * @param dev_info [IN ] pcie struct device information structure to
- which this mhi context belongs
* @param mhi_struct device [IN/OUT] reference to a mhi context to be populated
*
* @return errno
*/
-int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
- struct mhi_device_ctxt *mhi_dev_ctxt)
+int mhi_init_device_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
- if (NULL == dev_info || NULL == mhi_dev_ctxt)
- return -EINVAL;
-
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
-
- mhi_dev_ctxt->dev_info = dev_info;
- mhi_dev_ctxt->dev_props = &dev_info->core;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
r = mhi_populate_event_cfg(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get event ring properties ret %d\n", r);
goto error_during_props;
}
r = mhi_init_sync(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR, "Failed to initialize mhi sync\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to initialize mhi sync\n");
goto error_during_sync;
}
r = create_local_ev_ctxt(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize local event ctxt ret %d\n", r);
goto error_during_local_ev_ctxt;
}
r = init_mhi_dev_mem(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize device memory ret %d\n", r);
goto error_during_dev_mem_init;
}
r = mhi_init_events(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize mhi events ret %d\n", r);
goto error_wq_init;
}
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize work queues ret %d\n", r);
goto error_during_thread_init;
}
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- r = mhi_spawn_threads(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_ERROR, "Failed to spawn threads ret %d\n", r);
- goto error_during_thread_spawn;
- }
mhi_init_wakelock(mhi_dev_ctxt);
return r;
-error_during_thread_spawn:
- kfree(mhi_dev_ctxt->state_change_work_item_list.q_lock);
error_during_thread_init:
- kfree(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- kfree(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m0_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.m3_event);
kfree(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
error_wq_init:
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
@@ -623,7 +463,8 @@ int mhi_reset_all_thread_queues(
ret_val = mhi_init_state_change_thread_work_queue(
&mhi_dev_ctxt->state_change_work_item_list);
if (ret_val)
- mhi_log(MHI_MSG_ERROR, "Failed to reset STT work queue\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to reset STT work queue\n");
return ret_val;
}
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index d7c604419593..95efe62eb8d4 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,13 +29,14 @@ static int mhi_process_event_ring(
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "enter ev_index:%u\n", ev_index);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR, "Invalid MHI PM State\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return -EIO;
}
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
@@ -77,10 +78,9 @@ static int mhi_process_event_ring(
&cmd_pkt, ev_index);
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"MHI CCE received ring 0x%x chan:%u\n",
- ev_index,
- chan);
+ ev_index, chan);
spin_lock_irqsave(&cfg->event_lock, flags);
cfg->cmd_pkt = *cmd_pkt;
cfg->cmd_event_pkt =
@@ -102,9 +102,8 @@ static int mhi_process_event_ring(
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
if (unlikely(!VALID_CHAN_NR(chan))) {
- mhi_log(MHI_MSG_ERROR,
- "Invalid chan:%d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid chan:%d\n", chan);
break;
}
ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
@@ -122,13 +121,15 @@ static int mhi_process_event_ring(
enum STATE_TRANSITION new_state;
unsigned long flags;
new_state = MHI_READ_STATE(&event_to_process);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"MHI STE received ring 0x%x State:%s\n",
- ev_index,
- state_transition_str(new_state));
+ ev_index, state_transition_str(new_state));
- /* If transitioning to M1 schedule worker thread */
- if (new_state == STATE_TRANSITION_M1) {
+ switch (new_state) {
+ case STATE_TRANSITION_M0:
+ process_m0_transition(mhi_dev_ctxt);
+ break;
+ case STATE_TRANSITION_M1:
write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
flags);
mhi_dev_ctxt->mhi_state =
@@ -142,9 +143,15 @@ static int mhi_process_event_ring(
write_unlock_irqrestore(&mhi_dev_ctxt->
pm_xfer_lock,
flags);
- } else {
- mhi_init_state_transition(mhi_dev_ctxt,
- new_state);
+ break;
+ case STATE_TRANSITION_M3:
+ process_m3_transition(mhi_dev_ctxt);
+ break;
+ default:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Unsupported STE received ring 0x%x State:%s\n",
+ ev_index,
+ state_transition_str(new_state));
}
break;
}
@@ -152,9 +159,8 @@ static int mhi_process_event_ring(
{
enum STATE_TRANSITION new_state;
- mhi_log(MHI_MSG_INFO,
- "MHI EEE received ring 0x%x\n",
- ev_index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI EEE received ring 0x%x\n", ev_index);
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
switch (MHI_READ_EXEC_ENV(&event_to_process)) {
@@ -168,21 +174,25 @@ static int mhi_process_event_ring(
mhi_init_state_transition(mhi_dev_ctxt,
new_state);
break;
+ case MHI_EXEC_ENV_BHIE:
+ new_state = STATE_TRANSITION_BHIE;
+ mhi_init_state_transition(mhi_dev_ctxt,
+ new_state);
}
break;
}
case MHI_PKT_TYPE_STALE_EVENT:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Stale Event received for chan:%u\n",
MHI_EV_READ_CHID(EV_CHID, local_rp));
break;
case MHI_PKT_TYPE_SYS_ERR_EVENT:
- mhi_log(MHI_MSG_INFO,
- "MHI System Error Detected. Triggering Reset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI System Error Detected. Triggering Reset\n");
BUG();
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
MHI_TRB_READ_INFO(EV_TRB_TYPE,
&event_to_process));
@@ -200,178 +210,141 @@ static int mhi_process_event_ring(
--event_quota;
}
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "exit ev_index:%u\n", ev_index);
return ret_val;
}
-int parse_event_thread(void *ctxt)
+void mhi_ev_task(unsigned long data)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = ctxt;
- u32 i = 0;
- int ret_val = 0;
- int ret_val_process_event = 0;
- atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
+ struct mhi_ring *mhi_ring = (struct mhi_ring *)data;
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ mhi_ring->mhi_dev_ctxt;
+ int ev_index = mhi_ring->index;
+ struct mhi_event_ring_cfg *ring_props =
+ &mhi_dev_ctxt->ev_ring_props[ev_index];
- /* Go through all event rings */
- for (;;) {
- ret_val =
- wait_event_interruptible(
- *mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
- ((atomic_read(
- &mhi_dev_ctxt->counters.events_pending) > 0) &&
- !mhi_dev_ctxt->flags.stop_threads) ||
- mhi_dev_ctxt->flags.kill_threads ||
- (mhi_dev_ctxt->flags.stop_threads &&
- !mhi_dev_ctxt->flags.ev_thread_stopped));
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
+ /* Process event ring */
+ mhi_process_event_ring(mhi_dev_ctxt, ev_index, ring_props->nr_desc);
- switch (ret_val) {
- case -ERESTARTSYS:
- return 0;
- default:
- if (mhi_dev_ctxt->flags.kill_threads) {
- mhi_log(MHI_MSG_INFO,
- "Caught exit signal, quitting\n");
- return 0;
- }
- if (mhi_dev_ctxt->flags.stop_threads) {
- mhi_dev_ctxt->flags.ev_thread_stopped = 1;
- continue;
- }
- break;
- }
- mhi_dev_ctxt->flags.ev_thread_stopped = 0;
- atomic_dec(&mhi_dev_ctxt->counters.events_pending);
- for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
- mhi_log(MHI_MSG_INFO,
- "SYS_ERR detected, not processing events\n");
- atomic_set(&mhi_dev_ctxt->
- counters.events_pending,
- 0);
- break;
- }
- if (GET_EV_PROPS(EV_MANAGED,
- mhi_dev_ctxt->ev_ring_props[i].flags)) {
- ret_val_process_event =
- mhi_process_event_ring(mhi_dev_ctxt,
- i,
- mhi_dev_ctxt->
- ev_ring_props[i].nr_desc);
- if (ret_val_process_event == -EINPROGRESS)
- atomic_inc(ev_pen_ptr);
- }
- }
- }
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
}
-void mhi_ctrl_ev_task(unsigned long data)
+void process_event_ring(struct work_struct *work)
{
+ struct mhi_ring *mhi_ring =
+ container_of(work, struct mhi_ring, ev_worker);
struct mhi_device_ctxt *mhi_dev_ctxt =
- (struct mhi_device_ctxt *)data;
- const unsigned CTRL_EV_RING = 0;
+ mhi_ring->mhi_dev_ctxt;
+ int ev_index = mhi_ring->index;
struct mhi_event_ring_cfg *ring_props =
- &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
+ &mhi_dev_ctxt->ev_ring_props[ev_index];
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Enter\n");
+ /* Process event ring */
+ mhi_process_event_ring(mhi_dev_ctxt, ev_index, ring_props->nr_desc);
- mhi_log(MHI_MSG_VERBOSE, "Enter\n");
- /* Process control event ring */
- mhi_process_event_ring(mhi_dev_ctxt,
- CTRL_EV_RING,
- ring_props->nr_desc);
- enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
- mhi_log(MHI_MSG_VERBOSE, "Exit\n");
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, ev_index));
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Exit\n");
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
{
int ret_val;
+ struct mhi_client_config *client_config = client_handle->client_config;
- client_handle->result.buf_addr = NULL;
- client_handle->result.bytes_xferd = 0;
- client_handle->result.transaction_status = 0;
- ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
- client_handle->event_ring_index,
+ client_config->result.buf_addr = NULL;
+ client_config->result.bytes_xferd = 0;
+ client_config->result.transaction_status = 0;
+ ret_val = mhi_process_event_ring(client_config->mhi_dev_ctxt,
+ client_config->event_ring_index,
1);
if (ret_val)
- mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
- return &(client_handle->result);
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "NAPI failed to process event ring\n");
+ return &(client_config->result);
}
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config = client_handle->client_config;
struct mhi_device_ctxt *mhi_dev_ctxt =
- client_handle->mhi_dev_ctxt;
+ client_config->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
- mhi_local_event_ctxt[client_handle->event_ring_index];
+ mhi_local_event_ctxt[client_config->event_ring_index];
- disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_config->msi_vec));
ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config = client_handle->client_config;
struct mhi_device_ctxt *mhi_dev_ctxt =
- client_handle->mhi_dev_ctxt;
+ client_config->mhi_dev_ctxt;
struct mhi_ring *ev_ring = &mhi_dev_ctxt->
- mhi_local_event_ctxt[client_handle->event_ring_index];
+ mhi_local_event_ctxt[client_config->event_ring_index];
ev_ring->msi_enable_cntr++;
- enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_config->msi_vec));
}
irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
{
- struct device *mhi_device = dev_id;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_id;
int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[msi];
+ struct mhi_event_ring_cfg *ring_props =
+ &mhi_dev_ctxt->ev_ring_props[msi];
- if (!mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
- return IRQ_HANDLED;
- }
mhi_dev_ctxt->counters.msi_counter[
IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-
- if (msi) {
- atomic_inc(&mhi_dev_ctxt->counters.events_pending);
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- } else {
- disable_irq_nosync(irq_number);
- tasklet_schedule(&mhi_dev_ctxt->ev_task);
- }
+ disable_irq_nosync(irq_number);
+ if (ring_props->priority <= MHI_EV_PRIORITY_TASKLET)
+ tasklet_schedule(&mhi_ring->ev_task);
+ else
+ schedule_work(&mhi_ring->ev_worker);
return IRQ_HANDLED;
}
irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
{
- struct device *mhi_device = dev_id;
- u32 client_index;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_id;
+ struct mhi_event_ring_cfg *ev_ring_props;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
struct mhi_client_info_t *client_info;
struct mhi_cb_info cb_info;
int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
mhi_dev_ctxt->counters.msi_counter[msi_num]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
trace_mhi_msi(msi_num);
- client_index = MHI_MAX_CHANNELS -
- (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
- client_handle = mhi_dev_ctxt->client_handle_list[client_index];
- client_info = &client_handle->client_info;
- if (likely(client_handle)) {
- client_handle->result.user_data =
- client_handle->user_data;
- if (likely(client_info->mhi_client_cb)) {
- cb_info.result = &client_handle->result;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.chan = client_handle->chan_info.chan_nr;
- cb_info.result->transaction_status = 0;
- client_info->mhi_client_cb(&cb_info);
- }
+
+ /* Obtain client config from MSI */
+ ev_ring_props = &mhi_dev_ctxt->ev_ring_props[msi_num];
+ client_handle = mhi_dev_ctxt->client_handle_list[ev_ring_props->chan];
+ if (unlikely(!client_handle)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Recv MSI for unreg chan:%u\n", ev_ring_props->chan);
+ return IRQ_HANDLED;
}
+
+ client_config = client_handle->client_config;
+ client_info = &client_config->client_info;
+ client_config->result.user_data =
+ client_config->user_data;
+ cb_info.result = &client_config->result;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.chan = client_config->chan_info.chan_nr;
+ cb_info.result->transaction_status = 0;
+ client_info->mhi_client_cb(&cb_info);
+
return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h
index 133c0eeb034e..fc0e6f4bc27d 100644
--- a/drivers/platform/msm/mhi/mhi_macros.h
+++ b/drivers/platform/msm/mhi/mhi_macros.h
@@ -39,7 +39,6 @@
#define MHI_WORK_Q_MAX_SIZE 128
#define MAX_XFER_WORK_ITEMS 100
-#define MHI_MAX_SUPPORTED_DEVICES 1
#define MHI_PCIE_VENDOR_ID 0x17CB
#define MHI_PCIE_DEVICE_ID_9x35 0x0300
@@ -70,9 +69,9 @@
((enum MHI_CLIENT_CHANNEL)(_CHAN_NR) < MHI_CLIENT_RESERVED_1_LOWER))
#define IRQ_TO_MSI(_MHI_DEV_CTXT, _IRQ_NR) \
- ((_IRQ_NR) - (_MHI_DEV_CTXT)->dev_info->core.irq_base)
+ ((_IRQ_NR) - (_MHI_DEV_CTXT)->core.irq_base)
#define MSI_TO_IRQ(_MHI_DEV_CTXT, _MSI_NR) \
- ((_MHI_DEV_CTXT)->dev_info->core.irq_base + (_MSI_NR))
+ ((_MHI_DEV_CTXT)->core.irq_base + (_MSI_NR))
#define VALID_CHAN_NR(_CHAN_NR) (IS_HARDWARE_CHANNEL(_CHAN_NR) || \
IS_SOFTWARE_CHANNEL(_CHAN_NR))
@@ -84,8 +83,8 @@
#define MHI_HW_INTMOD_VAL_MS 2
/* Timeout Values */
-#define MHI_READY_STATUS_TIMEOUT_MS 50
-#define MHI_THREAD_SLEEP_TIMEOUT_MS 20
+#define MHI_READY_STATUS_TIMEOUT_MS 500
+#define MHI_THREAD_SLEEP_TIMEOUT_MS 100
#define MHI_RESUME_WAKE_RETRIES 20
#define IS_HW_EV_RING(_mhi_dev_ctxt, _EV_INDEX) (_EV_INDEX >= \
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index 430dc918af7e..66d56bdd8f85 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -27,13 +27,23 @@
#include "mhi.h"
#include "mhi_hwio.h"
#include "mhi_macros.h"
+#include "mhi_bhi.h"
#include "mhi_trace.h"
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_cmd_pkt *cmd_pkt);
-
-static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
+static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *bb_ctxt);
+
+static int enable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *bb_ctxt,
+ int nr_el,
+ int chan,
+ size_t max_payload)
{
+ int i;
+ struct mhi_buf_info *mhi_buf_info;
+
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
bb_ctxt->len = bb_ctxt->el_size * nr_el;
bb_ctxt->base = kzalloc(bb_ctxt->len, GFP_KERNEL);
@@ -42,7 +52,46 @@ static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
bb_ctxt->ack_rp = bb_ctxt->base;
if (!bb_ctxt->base)
return -ENOMEM;
+
+ if (mhi_dev_ctxt->flags.bb_required) {
+ char pool_name[32];
+
+ snprintf(pool_name, sizeof(pool_name), "mhi%d_%d",
+ mhi_dev_ctxt->plat_dev->id, chan);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Creating pool %s for chan:%d payload: 0x%lx\n",
+ pool_name, chan, max_payload);
+
+ bb_ctxt->dma_pool = dma_pool_create(pool_name,
+ &mhi_dev_ctxt->plat_dev->dev, max_payload, 0, 0);
+ if (unlikely(!bb_ctxt->dma_pool))
+ goto dma_pool_error;
+
+ mhi_buf_info = (struct mhi_buf_info *)bb_ctxt->base;
+ for (i = 0; i < nr_el; i++, mhi_buf_info++) {
+ mhi_buf_info->pre_alloc_v_addr =
+ dma_pool_alloc(bb_ctxt->dma_pool, GFP_KERNEL,
+ &mhi_buf_info->pre_alloc_p_addr);
+ if (unlikely(!mhi_buf_info->pre_alloc_v_addr))
+ goto dma_alloc_error;
+ mhi_buf_info->pre_alloc_len = max_payload;
+ }
+ }
+
return 0;
+
+dma_alloc_error:
+ for (--i, --mhi_buf_info; i >= 0; i--, mhi_buf_info--)
+ dma_pool_free(bb_ctxt->dma_pool, mhi_buf_info->pre_alloc_v_addr,
+ mhi_buf_info->pre_alloc_p_addr);
+
+ dma_pool_destroy(bb_ctxt->dma_pool);
+ bb_ctxt->dma_pool = NULL;
+dma_pool_error:
+ kfree(bb_ctxt->base);
+ bb_ctxt->base = NULL;
+ return -ENOMEM;
}
static void mhi_write_db(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -66,7 +115,7 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_trb_write_ptr = val;
} else if (mhi_dev_ctxt->mmio_info.event_db_addr == io_addr) {
if (chan < mhi_dev_ctxt->mmio_info.nr_event_rings) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"EV ctxt: %ld val 0x%llx WP 0x%llx RP: 0x%llx",
chan, val,
mhi_dev_ctxt->dev_space.ring_ctxt.
@@ -76,7 +125,7 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[chan].
mhi_event_write_ptr = val;
} else {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Bad EV ring index: %lx\n", chan);
}
}
@@ -84,65 +133,48 @@ static void mhi_update_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
wmb();
}
-int mhi_init_pcie_device(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int mhi_init_pcie_device(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
long int sleep_time = 100;
- struct pci_dev *pcie_device =
- (struct pci_dev *)mhi_pcie_dev->pcie_device;
+ struct pci_dev *pcie_device = mhi_dev_ctxt->pcie_device;
+ struct pcie_core_info *core = &mhi_dev_ctxt->core;
do {
- ret_val = pci_enable_device(mhi_pcie_dev->pcie_device);
+ ret_val = pci_enable_device(pcie_device);
if (0 != ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to enable pcie struct device r: %d\n",
ret_val);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Sleeping for ~ %li uS, and retrying.\n",
sleep_time);
msleep(sleep_time);
}
} while (ret_val != 0);
- mhi_log(MHI_MSG_INFO, "Successfully enabled pcie device.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Successfully enabled pcie device.\n");
- mhi_pcie_dev->core.bar0_base =
- ioremap_nocache(pci_resource_start(pcie_device, 0),
- pci_resource_len(pcie_device, 0));
- if (!mhi_pcie_dev->core.bar0_base)
+ core->bar0_base = ioremap_nocache(pci_resource_start(pcie_device, 0),
+ pci_resource_len(pcie_device, 0));
+ if (!core->bar0_base)
goto mhi_device_list_error;
- mhi_pcie_dev->core.bar0_end = mhi_pcie_dev->core.bar0_base +
- pci_resource_len(pcie_device, 0);
- mhi_pcie_dev->core.bar2_base =
- ioremap_nocache(pci_resource_start(pcie_device, 2),
- pci_resource_len(pcie_device, 2));
- if (!mhi_pcie_dev->core.bar2_base)
- goto io_map_err;
-
- mhi_pcie_dev->core.bar2_end = mhi_pcie_dev->core.bar2_base +
- pci_resource_len(pcie_device, 2);
-
- if (!mhi_pcie_dev->core.bar0_base) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to register for pcie resources\n");
- goto mhi_pcie_read_ep_config_err;
- }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device BAR0 address is at 0x%p\n", core->bar0_base);
- mhi_log(MHI_MSG_INFO, "Device BAR0 address is at 0x%p\n",
- mhi_pcie_dev->core.bar0_base);
ret_val = pci_request_region(pcie_device, 0, "mhi");
if (ret_val)
- mhi_log(MHI_MSG_ERROR, "Could not request BAR0 region\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not request BAR0 region\n");
- mhi_pcie_dev->core.manufact_id = pcie_device->vendor;
- mhi_pcie_dev->core.dev_id = pcie_device->device;
+ core->manufact_id = pcie_device->vendor;
+ core->dev_id = pcie_device->device;
return 0;
-io_map_err:
- iounmap((void *)mhi_pcie_dev->core.bar0_base);
+
mhi_device_list_error:
pci_disable_device(pcie_device);
-mhi_pcie_read_ep_config_err:
return -EIO;
}
@@ -156,7 +188,7 @@ static void mhi_move_interrupts(struct mhi_device_ctxt *mhi_dev_ctxt, u32 cpu)
GET_EV_PROPS(EV_TYPE,
mhi_dev_ctxt->ev_ring_props[i].flags)) {
irq_to_affin = mhi_dev_ctxt->ev_ring_props[i].msi_vec;
- irq_to_affin += mhi_dev_ctxt->dev_props->irq_base;
+ irq_to_affin += mhi_dev_ctxt->core.irq_base;
irq_set_affinity(irq_to_affin, get_cpu_mask(cpu));
}
}
@@ -198,8 +230,9 @@ int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
scnprintf(dt_prop, MAX_BUF_SIZE, "%s%d", "mhi-chan-cfg-", chan);
r = of_property_read_u32_array(
- mhi_dev_ctxt->dev_info->plat_dev->dev.of_node,
- dt_prop, (u32 *)chan_info,
+ mhi_dev_ctxt->plat_dev->dev.of_node,
+ dt_prop,
+ (u32 *)chan_info,
sizeof(struct mhi_chan_info) / sizeof(u32));
return r;
}
@@ -211,9 +244,10 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
if (cc_list == NULL || ring == NULL)
return -EINVAL;
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
- ring->len, ring->base,
- cc_list->mhi_trb_ring_base_addr);
+ dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ ring->len,
+ ring->base,
+ cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
MHI_CHAN_STATE_DISABLED,
false,
@@ -221,38 +255,37 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
return 0;
}
-void free_tre_ring(struct mhi_client_handle *client_handle)
+void free_tre_ring(struct mhi_device_ctxt *mhi_dev_ctxt, int chan)
{
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- int chan = client_handle->chan_info.chan_nr;
int r;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
r = mhi_release_chan_ctxt(mhi_dev_ctxt, chan_ctxt,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan]);
if (r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to release chan %d ret %d\n", chan, r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to release chan %d ret %d\n", chan, r);
}
-static int populate_tre_ring(struct mhi_client_handle *client_handle)
+static int populate_tre_ring(struct mhi_client_config *client_config)
{
dma_addr_t ring_dma_addr;
void *ring_local_addr;
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- u32 chan = client_handle->chan_info.chan_nr;
- u32 nr_desc = client_handle->chan_info.max_desc;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ u32 chan = client_config->chan_info.chan_nr;
+ u32 nr_desc = client_config->chan_info.max_desc;
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered chan %d requested desc %d\n", chan, nr_desc);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- ring_local_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- nr_desc * sizeof(union mhi_xfer_pkt),
- &ring_dma_addr, GFP_KERNEL);
+ ring_local_addr =
+ dma_alloc_coherent(&mhi_dev_ctxt->plat_dev->dev,
+ nr_desc * sizeof(union mhi_xfer_pkt),
+ &ring_dma_addr,
+ GFP_KERNEL);
if (ring_local_addr == NULL)
return -ENOMEM;
@@ -261,15 +294,15 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
(uintptr_t)ring_local_addr,
nr_desc,
GET_CHAN_PROPS(CHAN_DIR,
- client_handle->chan_info.flags),
- client_handle->chan_info.ev_ring,
+ client_config->chan_info.flags),
+ client_config->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
MHI_CHAN_STATE_ENABLED,
GET_CHAN_PROPS(PRESERVE_DB_STATE,
- client_handle->chan_info.flags),
+ client_config->chan_info.flags),
GET_CHAN_PROPS(BRSTMODE,
- client_handle->chan_info.flags));
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ client_config->chan_info.flags));
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -283,85 +316,64 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
struct mhi_cmd_complete_event_pkt cmd_event_pkt;
union mhi_cmd_pkt cmd_pkt;
enum MHI_EVENT_CCS ev_code;
+ struct mhi_client_config *client_config = client_handle->client_config;
- if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
+ if (client_config->magic != MHI_HANDLE_MAGIC)
return -EINVAL;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- ret_val = get_chan_props(mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- &client_handle->chan_info);
- if (ret_val)
- return ret_val;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mutex_lock(&cfg->chan_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered: Client opening chan 0x%x\n", chan);
if (mhi_dev_ctxt->dev_exec_env <
GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
- client_handle->chan_info.flags)) {
- mhi_log(MHI_MSG_INFO,
+ client_config->chan_info.flags)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Chan %d, MHI exec_env %d, not ready!\n",
- chan,
- mhi_dev_ctxt->dev_exec_env);
+ chan, mhi_dev_ctxt->dev_exec_env);
mutex_unlock(&cfg->chan_lock);
return -ENOTCONN;
}
- ret_val = populate_tre_ring(client_handle);
+ ret_val = populate_tre_ring(client_config);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
- chan,
- ret_val);
- mutex_unlock(&cfg->chan_lock);
- return ret_val;
- }
- client_handle->event_ring_index =
- mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
- mhi_event_ring_index;
- ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
- client_handle->chan_info.max_desc);
- if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to initialize bb ctxt chan %d ret %d\n",
- chan,
- ret_val);
- mutex_unlock(&cfg->chan_lock);
- return ret_val;
+ chan, ret_val);
+ goto error_tre_ring;
}
+ client_config->event_ring_index =
+ mhi_dev_ctxt->dev_space.ring_ctxt.
+ cc_list[chan].mhi_event_ring_index;
- client_handle->msi_vec =
+ client_config->msi_vec =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
- client_handle->event_ring_index].mhi_msi_vector;
- client_handle->intmod_t =
+ client_config->event_ring_index].mhi_msi_vector;
+ client_config->intmod_t =
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
- client_handle->event_ring_index].mhi_intmodt;
+ client_config->event_ring_index].mhi_intmodt;
init_completion(&cfg->cmd_complete);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI State is disabled\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mutex_unlock(&cfg->chan_lock);
- return -EIO;
+ ret_val = -EIO;
+ goto error_pm_state;
}
- WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
- spin_lock_irq(&chan_ring->ring_lock);
- chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
- spin_unlock_irq(&chan_ring->ring_lock);
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ ret_val = mhi_send_cmd(client_config->mhi_dev_ctxt,
MHI_COMMAND_START_CHAN,
chan);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to send start cmd for chan %d ret %d\n",
chan, ret_val);
goto error_completion;
@@ -369,9 +381,9 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
if (!ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to receive cmd completion for %d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n", chan);
+ ret_val = -EIO;
goto error_completion;
} else {
ret_val = 0;
@@ -385,76 +397,159 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
if (ev_code != MHI_EVENT_CC_SUCCESS) {
- mhi_log(MHI_MSG_ERROR,
- "Error to receive event completion ev_code:0x%x\n",
- ev_code);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error to receive event comp. ev_code:0x%x\n", ev_code);
ret_val = -EIO;
goto error_completion;
}
- client_handle->chan_status = 1;
-
-error_completion:
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ client_config->chan_status = 1;
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
mutex_unlock(&cfg->chan_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "chan:%d opened successfully\n", chan);
+ return 0;
+
+error_completion:
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+error_pm_state:
+ free_tre_ring(mhi_dev_ctxt, chan);
+error_tre_ring:
+ mutex_unlock(&cfg->chan_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
}
EXPORT_SYMBOL(mhi_open_channel);
+bool mhi_is_device_ready(const struct device * const dev,
+ const char *node_name)
+{
+ struct mhi_device_ctxt *itr;
+ const struct device_node *of_node;
+ bool match_found = false;
+
+ if (!mhi_device_drv)
+ return false;
+ if (dev->of_node == NULL)
+ return false;
+
+ of_node = of_parse_phandle(dev->of_node, node_name, 0);
+ if (!of_node)
+ return false;
+
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct platform_device *pdev = itr->plat_dev;
+
+ if (pdev->dev.of_node == of_node) {
+ match_found = true;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
+ return match_found;
+}
+EXPORT_SYMBOL(mhi_is_device_ready);
+
int mhi_register_channel(struct mhi_client_handle **client_handle,
- enum MHI_CLIENT_CHANNEL chan, s32 device_index,
- struct mhi_client_info_t *client_info, void *user_data)
+ struct mhi_client_info_t *client_info)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
+ const struct device_node *of_node;
+ struct mhi_client_config *client_config;
+ const char *node_name;
+ enum MHI_CLIENT_CHANNEL chan;
+ int ret;
- if (!VALID_CHAN_NR(chan))
+ if (!client_info || client_info->dev->of_node == NULL)
return -EINVAL;
- if (NULL == client_handle || device_index < 0)
+ node_name = client_info->node_name;
+ chan = client_info->chan;
+ of_node = of_parse_phandle(client_info->dev->of_node, node_name, 0);
+ if (!of_node || !mhi_device_drv || chan >= MHI_MAX_CHANNELS)
return -EINVAL;
- mhi_dev_ctxt = &(mhi_devices.device_list[device_index].mhi_ctxt);
+ /* Traverse thru the list */
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct platform_device *pdev = itr->plat_dev;
+
+ if (pdev->dev.of_node == of_node) {
+ mhi_dev_ctxt = itr;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- return -EISCONN;
+ if (!mhi_dev_ctxt)
+ return -EINVAL;
- mhi_log(MHI_MSG_INFO,
- "Opened channel 0x%x for client\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Registering channel 0x%x for client\n", chan);
*client_handle = kzalloc(sizeof(struct mhi_client_handle), GFP_KERNEL);
if (NULL == *client_handle)
return -ENOMEM;
+ (*client_handle)->client_config =
+ kzalloc(sizeof(*(*client_handle)->client_config), GFP_KERNEL);
+ if ((*client_handle)->client_config == NULL) {
+ kfree(*client_handle);
+ *client_handle = NULL;
+ return -ENOMEM;
+ }
mhi_dev_ctxt->client_handle_list[chan] = *client_handle;
- (*client_handle)->mhi_dev_ctxt = mhi_dev_ctxt;
- (*client_handle)->user_data = user_data;
- (*client_handle)->magic = MHI_HANDLE_MAGIC;
- (*client_handle)->chan_info.chan_nr = chan;
+ (*client_handle)->dev_id = mhi_dev_ctxt->core.dev_id;
+ (*client_handle)->domain = mhi_dev_ctxt->core.domain;
+ (*client_handle)->bus = mhi_dev_ctxt->core.bus;
+ (*client_handle)->slot = mhi_dev_ctxt->core.slot;
+ client_config = (*client_handle)->client_config;
+ client_config->mhi_dev_ctxt = mhi_dev_ctxt;
+ client_config->user_data = client_info->user_data;
+ client_config->magic = MHI_HANDLE_MAGIC;
+ client_config->chan_info.chan_nr = chan;
if (NULL != client_info)
- (*client_handle)->client_info = *client_info;
+ client_config->client_info = *client_info;
if (MHI_CLIENT_IP_HW_0_OUT == chan)
- (*client_handle)->intmod_t = 10;
+ client_config->intmod_t = 10;
if (MHI_CLIENT_IP_HW_0_IN == chan)
- (*client_handle)->intmod_t = 10;
+ client_config->intmod_t = 10;
+
+ get_chan_props(mhi_dev_ctxt, chan, &client_config->chan_info);
+ ret = enable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan],
+ client_config->chan_info.max_desc, chan,
+ client_config->client_info.max_payload);
+ if (ret) {
+ kfree(mhi_dev_ctxt->client_handle_list[chan]->client_config);
+ kfree(mhi_dev_ctxt->client_handle_list[chan]);
+ mhi_dev_ctxt->client_handle_list[chan] = NULL;
+ return -ENOMEM;
+ }
- if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS) {
- mhi_log(MHI_MSG_INFO,
- "Exec env is AMSS notifing client now chan: 0x%x\n",
- chan);
+ if (mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_AMSS &&
+ mhi_dev_ctxt->flags.mhi_initialized) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exec env is AMSS notify client now chan:%u\n", chan);
mhi_notify_client(*client_handle, MHI_CB_MHI_ENABLED);
}
- mhi_log(MHI_MSG_VERBOSE,
- "Successfuly registered chan 0x%x\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Successfuly registered chan:%u\n", chan);
return 0;
}
EXPORT_SYMBOL(mhi_register_channel);
@@ -469,46 +564,54 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
union mhi_cmd_pkt cmd_pkt;
struct mhi_ring *chan_ring;
enum MHI_EVENT_CCS ev_code;
+ struct mhi_client_config *client_config =
+ client_handle->client_config;
- if (!client_handle ||
- client_handle->magic != MHI_HANDLE_MAGIC ||
- !client_handle->chan_status)
+ if (client_config->magic != MHI_HANDLE_MAGIC ||
+ !client_config->chan_status)
return;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
- mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Client attempting to close chan 0x%x\n", chan);
+
chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mutex_lock(&cfg->chan_lock);
/* No more processing events for this channel */
spin_lock_irq(&chan_ring->ring_lock);
+ if (chan_ring->ch_state != MHI_CHAN_STATE_ENABLED) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Chan %d is not enabled, cur state:0x%x\n",
+ chan, chan_ring->ch_state);
+ spin_unlock_irq(&chan_ring->ring_lock);
+ mutex_unlock(&cfg->chan_lock);
+ return;
+ }
chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
spin_unlock_irq(&chan_ring->ring_lock);
init_completion(&cfg->cmd_complete);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_RESET_CHAN,
- chan);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ ret_val = mhi_send_cmd(mhi_dev_ctxt,
+ MHI_COMMAND_RESET_CHAN, chan);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to send reset cmd for chan %d ret %d\n",
- chan,
- ret_val);
+ chan, ret_val);
goto error_completion;
}
ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
if (!ret_val) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to receive cmd completion for %d\n",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n", chan);
goto error_completion;
}
@@ -519,28 +622,36 @@ void mhi_close_channel(struct mhi_client_handle *client_handle)
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
((union mhi_event_pkt *)&cmd_event_pkt));
if (ev_code != MHI_EVENT_CC_SUCCESS) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error to receive event completion ev_cod:0x%x\n",
ev_code);
- goto error_completion;
}
+error_completion:
ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
if (ret_val)
- mhi_log(MHI_MSG_ERROR,
- "Error resetting cmd ret:%d\n",
- ret_val);
-
-error_completion:
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error resetting cmd ret:%d\n", ret_val);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
- free_tre_ring(client_handle);
- mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
- client_handle->chan_status = 0;
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "resetting bb_ring for chan 0x%x\n", chan);
+ mhi_dev_ctxt->chan_bb_list[chan].rp =
+ mhi_dev_ctxt->chan_bb_list[chan].base;
+ mhi_dev_ctxt->chan_bb_list[chan].wp =
+ mhi_dev_ctxt->chan_bb_list[chan].base;
+ mhi_dev_ctxt->chan_bb_list[chan].ack_rp =
+ mhi_dev_ctxt->chan_bb_list[chan].base;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Freeing ring for chan 0x%x\n", chan);
+ free_tre_ring(mhi_dev_ctxt, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Chan 0x%x confirmed closed.\n", chan);
+ client_config->chan_status = 0;
mutex_unlock(&cfg->chan_lock);
}
EXPORT_SYMBOL(mhi_close_channel);
@@ -596,6 +707,7 @@ static inline int mhi_queue_tre(struct mhi_device_ctxt
}
return 0;
}
+
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
enum dma_data_direction dir, struct mhi_buf_info **bb)
@@ -606,7 +718,8 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int r;
uintptr_t bb_index, ctxt_index_wp, ctxt_index_rp;
- mhi_log(MHI_MSG_RAW, "Entered chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "Entered chan %d\n", chan);
get_element_index(bb_ctxt, bb_ctxt->wp, &bb_index);
get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
@@ -615,9 +728,9 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp,
&ctxt_index_rp);
BUG_ON(bb_index != ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
- "Chan RP index %ld Chan WP index %ld, chan %d\n",
- ctxt_index_rp, ctxt_index_wp, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Chan RP index %ld Chan WP index %ld, chan %d\n",
+ ctxt_index_rp, ctxt_index_wp, chan);
r = ctxt_add_element(bb_ctxt, (void **)&bb_info);
if (r)
return r;
@@ -626,88 +739,76 @@ static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
bb_info->client_buf = buf;
bb_info->dir = dir;
bb_info->bb_p_addr = dma_map_single(
- &mhi_dev_ctxt->dev_info->plat_dev->dev,
+ &mhi_dev_ctxt->plat_dev->dev,
bb_info->client_buf,
bb_info->buf_len,
bb_info->dir);
+ bb_info->bb_active = 0;
if (!VALID_BUF(bb_info->bb_p_addr, bb_info->buf_len, mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Buffer outside DMA range 0x%lx, size 0x%zx\n",
- (uintptr_t)bb_info->bb_p_addr, buf_len);
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ (uintptr_t)bb_info->bb_p_addr, buf_len);
+ dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb_info->bb_p_addr,
bb_info->buf_len,
bb_info->dir);
- mhi_log(MHI_MSG_RAW, "Allocating BB, chan %d\n", chan);
- bb_info->bb_v_addr = dma_alloc_coherent(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- bb_info->buf_len,
- &bb_info->bb_p_addr,
- GFP_ATOMIC);
- if (!bb_info->bb_v_addr)
- return -ENOMEM;
- mhi_dev_ctxt->counters.bb_used[chan]++;
- if (dir == DMA_TO_DEVICE) {
- mhi_log(MHI_MSG_INFO, "Copying client buf into BB.\n");
- memcpy(bb_info->bb_v_addr, buf, bb_info->buf_len);
- /* Flush out data to bounce buffer */
- wmb();
- }
- bb_info->bb_active = 1;
+
+ if (likely((mhi_dev_ctxt->flags.bb_required &&
+ bb_info->pre_alloc_len >= bb_info->buf_len))) {
+ bb_info->bb_p_addr = bb_info->pre_alloc_p_addr;
+ bb_info->bb_v_addr = bb_info->pre_alloc_v_addr;
+ mhi_dev_ctxt->counters.bb_used[chan]++;
+ if (dir == DMA_TO_DEVICE) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Copying client buf into BB.\n");
+ memcpy(bb_info->bb_v_addr, buf,
+ bb_info->buf_len);
+ }
+ bb_info->bb_active = 1;
+ } else
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "No BB allocated\n");
}
*bb = bb_info;
- mhi_log(MHI_MSG_RAW, "Exited chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited chan %d\n", chan);
return 0;
}
+static void disable_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *bb_ctxt)
+{
+ if (mhi_dev_ctxt->flags.bb_required) {
+ struct mhi_buf_info *bb =
+ (struct mhi_buf_info *)bb_ctxt->base;
+ int nr_el = bb_ctxt->len / bb_ctxt->el_size;
+ int i = 0;
+
+ for (i = 0; i < nr_el; i++, bb++)
+ dma_pool_free(bb_ctxt->dma_pool, bb->pre_alloc_v_addr,
+ bb->pre_alloc_p_addr);
+ dma_pool_destroy(bb_ctxt->dma_pool);
+ bb_ctxt->dma_pool = NULL;
+ }
+
+ kfree(bb_ctxt->base);
+ bb_ctxt->base = NULL;
+}
+
static void free_bounce_buffer(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb)
{
- mhi_log(MHI_MSG_RAW, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Entered\n");
if (!bb->bb_active)
/* This buffer was maped directly to device */
- dma_unmap_single(&mhi_dev_ctxt->dev_info->plat_dev->dev,
+ dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bb->bb_p_addr, bb->buf_len, bb->dir);
- else
- /* This buffer was bounced */
- dma_free_coherent(&mhi_dev_ctxt->dev_info->pcie_device->dev,
- bb->buf_len,
- bb->bb_v_addr,
- bb->bb_p_addr);
- bb->bb_active = 0;
- mhi_log(MHI_MSG_RAW, "Exited\n");
-}
-
-void reset_bb_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
- struct mhi_ring *bb_ctxt)
-{
- int r = 0;
- struct mhi_buf_info *bb = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
- /*
- Assumption: No events are expected during or after
- this operation is occurring for this channel.
- If a bounce buffer was allocated, the coherent memory is
- expected to be already freed.
- If the user's bounce buffer was mapped, it is expected to be
- already unmapped.
- Failure of any of the above conditions will result in
- a memory leak or subtle memory corruption.
- */
- while (!r) {
- r = ctxt_del_element(bb_ctxt, (void **)&bb);
- if (bb)
- free_bounce_buffer(mhi_dev_ctxt, bb);
- }
- bb_ctxt->ack_rp = bb_ctxt->base;
- bb_ctxt->rp = bb_ctxt->base;
- bb_ctxt->wp = bb_ctxt->base;
- mhi_log(MHI_MSG_VERBOSE, "Exited\n");
+ bb->bb_active = 0;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
static int mhi_queue_dma_xfer(
- struct mhi_client_handle *client_handle,
+ struct mhi_client_config *client_config,
dma_addr_t buf, size_t buf_len, enum MHI_FLAGS mhi_flags)
{
union mhi_xfer_pkt *pkt_loc;
@@ -715,17 +816,17 @@ static int mhi_queue_dma_xfer(
enum MHI_CLIENT_CHANNEL chan;
struct mhi_device_ctxt *mhi_dev_ctxt;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
pkt_loc->type.info = mhi_flags;
trace_mhi_tre(pkt_loc, chan, 0);
- if (likely(0 != client_handle->intmod_t))
+ if (likely(client_config->intmod_t))
MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 1);
else
MHI_TRB_SET_INFO(TX_TRB_BEI, pkt_loc, 0);
@@ -736,21 +837,21 @@ static int mhi_queue_dma_xfer(
/* Ensure writes to descriptor are flushed */
wmb();
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Channel %d Has buf size of %zd and buf addr %lx, flags 0x%x\n",
- chan, buf_len, (uintptr_t)buf, mhi_flags);
+ chan, buf_len, (uintptr_t)buf, mhi_flags);
/* Add the TRB to the correct transfer ring */
ret_val = ctxt_add_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
(void *)&pkt_loc);
if (unlikely(0 != ret_val)) {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Failed to insert trb in xfer ring\n");
return ret_val;
}
if (MHI_OUT ==
- GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
+ GET_CHAN_PROPS(CHAN_DIR, client_config->chan_info.flags))
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
return ret_val;
@@ -765,56 +866,55 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
struct mhi_device_ctxt *mhi_dev_ctxt;
u32 chan;
unsigned long flags;
+ struct mhi_client_config *client_config;
if (!client_handle || !buf || !buf_len)
return -EINVAL;
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ client_config = client_handle->client_config;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"MHI is not in active state\n");
return -EINVAL;
}
-
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
+ if (GET_CHAN_PROPS(CHAN_DIR, client_config->chan_info.flags) == MHI_OUT)
dma_dir = DMA_TO_DEVICE;
else
dma_dir = DMA_FROM_DEVICE;
- r = create_bb(client_handle->mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- buf, buf_len, dma_dir, &bb);
+ r = create_bb(client_config->mhi_dev_ctxt,
+ client_config->chan_info.chan_nr,
+ buf,
+ buf_len,
+ dma_dir,
+ &bb);
if (r) {
- mhi_log(MHI_MSG_VERBOSE,
- "Failed to create BB, chan %d ret %d\n",
- chan,
- r);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
- pcie_device->dev);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Failed to create BB, chan %d ret %d\n", chan, r);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return r;
}
- mhi_log(MHI_MSG_VERBOSE,
- "Queueing to HW: Client Buf 0x%p, size 0x%zx, DMA %llx, chan %d\n",
- buf, buf_len, (u64)bb->bb_p_addr,
- client_handle->chan_info.chan_nr);
- r = mhi_queue_dma_xfer(client_handle,
- bb->bb_p_addr,
- bb->buf_len,
- mhi_flags);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Queueing to HW: Client Buf 0x%p, size 0x%zx, DMA %llx, chan %d\n",
+ buf, buf_len, (u64)bb->bb_p_addr,
+ client_config->chan_info.chan_nr);
+ r = mhi_queue_dma_xfer(client_config,
+ bb->bb_p_addr,
+ bb->buf_len,
+ mhi_flags);
/*
* Assumption: If create_bounce_buffer did not fail, we do not
@@ -826,11 +926,8 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
if (dma_dir == DMA_FROM_DEVICE) {
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
}
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return 0;
@@ -848,13 +945,12 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
- mhi_log(MHI_MSG_ERROR,
- "Invalid channel id, received id: 0x%x",
- chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid channel id, received id: 0x%x", chan);
return -EINVAL;
}
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
mhi_dev_ctxt->dev_exec_env, chan, cmd);
@@ -868,14 +964,16 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
break;
default:
- mhi_log(MHI_MSG_ERROR, "Bad command received\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Bad command received\n");
return -EINVAL;
}
spin_lock_irqsave(&mhi_ring->ring_lock, flags);
ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to insert element\n");
spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
return ret_val;
}
@@ -886,13 +984,10 @@ int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Sent command 0x%x for chan %d\n", cmd, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
- mhi_log(MHI_MSG_VERBOSE,
- "Sent command 0x%x for chan %d\n",
- cmd,
- chan);
-
- mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
return ret_val;
}
@@ -904,7 +999,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
bb = bb_ctxt->rp;
bb->filled_size = bounced_data_size;
@@ -915,7 +1010,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
if (bb->bb_active) {
/* This is coherent memory, no cache management is needed */
memcpy(bb->client_buf, bb->bb_v_addr, bb->filled_size);
- mhi_log(MHI_MSG_RAW,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Bounce from BB:0x%p to Client Buf: 0x%p Len 0x%zx\n",
bb->client_buf, bb->bb_v_addr, bb->filled_size);
}
@@ -929,7 +1024,7 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
* rp, since it can be moved async by mhi_poll_inbound
*/
free_bounce_buffer(mhi_dev_ctxt, bb);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
}
static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -940,7 +1035,7 @@ static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_buf_info *bb;
bb = bb_ctxt->rp;
- mhi_log(MHI_MSG_RAW, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Entered\n");
BUG_ON(bb->dir != DMA_TO_DEVICE);
bb->filled_size = bounced_data_size;
BUG_ON(bb->filled_size != bb->buf_len);
@@ -948,7 +1043,7 @@ static void parse_outbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->bytes_xferd = bb->filled_size;
result->transaction_status = 0;
free_bounce_buffer(mhi_dev_ctxt, bb);
- mhi_log(MHI_MSG_RAW, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW, "Exited\n");
}
static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -957,42 +1052,44 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_result *result = NULL;
int ret_val = 0;
struct mhi_client_handle *client_handle = NULL;
+ struct mhi_client_config *client_config;
struct mhi_ring *local_chan_ctxt = NULL;
struct mhi_cb_info cb_info;
struct mhi_ring *bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
client_handle = mhi_dev_ctxt->client_handle_list[chan];
+ client_config = client_handle->client_config;
/* If ring is empty */
MHI_ASSERT(!unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp ==
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp), "Empty Event Ring\n");
parse_outbound_bb(mhi_dev_ctxt, bb_ctxt,
- &client_handle->result, xfer_len);
+ &client_config->result, xfer_len);
- mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "Removing BB from head, chan %d\n", chan);
atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
- mhi_deassert_device_wake(mhi_dev_ctxt);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
NULL);
BUG_ON(ret_val);
ret_val = ctxt_del_element(bb_ctxt, NULL);
BUG_ON(ret_val);
- if (NULL != client_handle) {
- result = &mhi_dev_ctxt->client_handle_list[chan]->result;
- if (NULL != (&client_handle->client_info.mhi_client_cb)) {
- client_handle->result.user_data =
- client_handle->user_data;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.result = &client_handle->result;
- cb_info.chan = chan;
- client_handle->client_info.mhi_client_cb(&cb_info);
- }
+
+ result = &client_config->result;
+ if (NULL != (&client_config->client_info.mhi_client_cb)) {
+ client_config->result.user_data =
+ client_config->user_data;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.result = result;
+ cb_info.chan = chan;
+ client_config->client_info.mhi_client_cb(&cb_info);
}
- mhi_log(MHI_MSG_RAW,
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
"Processed outbound ack chan %d Pending acks %d.\n",
chan, atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
return 0;
@@ -1002,6 +1099,7 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan, union mhi_xfer_pkt *local_ev_trb_loc, u16 xfer_len)
{
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
struct mhi_ring *local_chan_ctxt;
struct mhi_result *result;
struct mhi_cb_info cb_info;
@@ -1010,16 +1108,14 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t bb_index, ctxt_index_rp, ctxt_index_wp;
client_handle = mhi_dev_ctxt->client_handle_list[chan];
+ client_config = client_handle->client_config;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
MHI_ASSERT(!unlikely(mhi_dev_ctxt->mhi_local_chan_ctxt[chan].rp ==
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp), "Empty Event Ring\n");
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- result = &mhi_dev_ctxt->client_handle_list[chan]->result;
-
- parse_inbound_bb(mhi_dev_ctxt, bb_ctxt,
- &client_handle->result, xfer_len);
+ result = &client_config->result;
+ parse_inbound_bb(mhi_dev_ctxt, bb_ctxt, result, xfer_len);
if (unlikely(IS_SOFTWARE_CHANNEL(chan))) {
MHI_TX_TRB_SET_LEN(TX_TRB_LEN, local_ev_trb_loc, xfer_len);
@@ -1034,19 +1130,19 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
get_element_index(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
&ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Chan RP index %ld Chan WP index %ld chan %d\n",
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
- if (NULL != client_handle->client_info.mhi_client_cb) {
- client_handle->result.user_data =
- client_handle->user_data;
+ if (client_config->client_info.mhi_client_cb) {
+ client_config->result.user_data =
+ client_config->user_data;
cb_info.cb_reason = MHI_CB_XFER;
- cb_info.result = &client_handle->result;
+ cb_info.result = &client_config->result;
cb_info.chan = chan;
- client_handle->client_info.mhi_client_cb(&cb_info);
+ client_config->client_info.mhi_client_cb(&cb_info);
} else {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"No client registered chan %d\n", chan);
}
} else {
@@ -1067,7 +1163,7 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp,
&ctxt_index_wp);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Chan RP index %ld Chan WP index %ld chan %d\n",
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
@@ -1088,27 +1184,32 @@ static int validate_xfer_el_addr(struct mhi_chan_ctxt *ring,
-ERANGE : 0;
}
-static void print_tre(int chan, struct mhi_ring *ring, struct mhi_tx_pkt *tre)
+static void print_tre(struct mhi_device_ctxt *mhi_dev_ctxt,
+ int chan,
+ struct mhi_ring *ring,
+ struct mhi_tx_pkt *tre)
{
uintptr_t el_index;
get_element_index(ring, tre, &el_index);
- mhi_log(MHI_MSG_ERROR, "Printing TRE 0x%p index %lx for channel %d:\n",
- tre, el_index, chan);
- mhi_log(MHI_MSG_ERROR, "Buffer Pointer 0x%llx, len 0x%x, info 0x%x\n",
- tre->buffer_ptr, tre->buf_len, tre->info);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Printing TRE 0x%p index %lx for channel %d:\n",
+ tre, el_index, chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Buffer Pointer 0x%llx, len 0x%x, info 0x%x\n",
+ tre->buffer_ptr, tre->buf_len, tre->info);
}
-int parse_xfer_event(struct mhi_device_ctxt *ctxt,
+int parse_xfer_event(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *event, u32 event_id)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt;
struct mhi_result *result;
u32 chan = MHI_MAX_CHANNELS;
u16 xfer_len;
uintptr_t phy_ev_trb_loc;
union mhi_xfer_pkt *local_ev_trb_loc;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
union mhi_xfer_pkt *local_trb_loc;
struct mhi_chan_ctxt *chan_ctxt;
u32 nr_trb_to_parse;
@@ -1121,11 +1222,11 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, event);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
- client_handle->pkt_count++;
- result = &client_handle->result;
- mhi_log(MHI_MSG_VERBOSE,
- "Event Received, chan %d, cc_code %d\n",
- chan, ev_code);
+ client_config = client_handle->client_config;
+ client_config->pkt_count++;
+ result = &client_config->result;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "Event Received, chan %d, cc_code %d\n", chan, ev_code);
if (ev_code == MHI_EVENT_CC_OVERFLOW)
result->transaction_status = -EOVERFLOW;
else
@@ -1158,10 +1259,9 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_ev_trb_loc,
&nr_trb_to_parse);
if (unlikely(ret_val)) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get nr available trbs ret: %d.\n",
ret_val);
- panic("critical error");
return ret_val;
}
do {
@@ -1177,9 +1277,8 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_trb_loc);
if (!VALID_BUF(trb_data_loc, xfer_len, mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Bad buffer ptr: %lx.\n",
- (uintptr_t)trb_data_loc);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Bad buf ptr: %llx.\n", trb_data_loc);
return -EINVAL;
}
if (local_chan_ctxt->dir == MHI_IN) {
@@ -1192,7 +1291,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_dev_ctxt->counters.chan_pkts_xferd[chan]++;
if (local_trb_loc ==
(union mhi_xfer_pkt *)local_chan_ctxt->rp) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Done. Processed until: %lx.\n",
(uintptr_t)trb_data_loc);
break;
@@ -1208,7 +1307,8 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
{
u64 db_value = 0;
- mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "DB_MODE/OOB Detected chan %d.\n", chan);
local_chan_ctxt->db_mode.db_mode = 1;
if (local_chan_ctxt->wp != local_chan_ctxt->rp) {
@@ -1219,9 +1319,6 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value);
}
- client_handle = mhi_dev_ctxt->client_handle_list[chan];
- if (client_handle)
- result->transaction_status = -ENOTCONN;
break;
}
case MHI_EVENT_CC_BAD_TRE:
@@ -1229,15 +1326,16 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
local_ev_trb_loc = (void *)mhi_p2v_addr(mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING, event_id,
phy_ev_trb_loc);
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Received BAD TRE event for ring %d, pointer 0x%p\n",
chan, local_ev_trb_loc);
- print_tre(chan, &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
+ print_tre(mhi_dev_ctxt, chan,
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
(struct mhi_tx_pkt *)local_ev_trb_loc);
BUG();
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Unknown TX completion.\n");
break;
@@ -1261,12 +1359,14 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
ret_val = ctxt_del_element(ring, &removed_element);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Could not remove element from ring\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not remove element from ring\n");
return ret_val;
}
ret_val = ctxt_add_element(ring, &added_element);
if (ret_val) {
- mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Could not add element to ring\n");
return ret_val;
}
@@ -1296,9 +1396,7 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
struct mhi_event_ctxt *ev_ctxt = NULL;
- struct mhi_client_handle *client_handle = NULL;
int pending_el = 0, i;
- struct mhi_ring *bb_ctxt;
unsigned long flags;
union mhi_event_pkt *local_rp = NULL;
union mhi_event_pkt *device_rp = NULL;
@@ -1306,20 +1404,19 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
if (!VALID_CHAN_NR(chan)) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Bad channel number for CCE\n");
return -EINVAL;
}
- bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- client_handle = mhi_dev_ctxt->client_handle_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
ev_ring = &mhi_dev_ctxt->
mhi_local_event_ctxt[chan_ctxt->mhi_event_ring_index];
ev_ctxt = &mhi_dev_ctxt->
dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
- mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processed cmd reset event\n");
/* Clear all stale events related to Channel */
spin_lock_irqsave(&ev_ring->ring_lock, flags);
@@ -1357,21 +1454,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
local_chan_ctxt->rp,
local_chan_ctxt->wp,
&pending_el);
- mhi_log(MHI_MSG_INFO, "Decrementing chan %d out acks by %d.\n",
- chan, pending_el);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Decrementing chan %d out acks by %d.\n", chan, pending_el);
atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
for (i = 0; i < pending_el; i++)
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
for (i = 0; i < pending_el; i++) {
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
}
/* Reset the local channel context */
@@ -1384,10 +1478,7 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
- mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
- reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
-
- mhi_log(MHI_MSG_INFO, "Reset complete.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Reset complete.\n");
return ret_val;
}
@@ -1418,15 +1509,17 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
+ struct mhi_client_config *client_config;
int chan = 0, r = 0;
- if (!client_handle || !result || !client_handle->mhi_dev_ctxt)
+ if (!client_handle || !result)
return -EINVAL;
+ client_config = client_handle->client_config;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
- mhi_log(MHI_MSG_VERBOSE, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Entered\n");
- mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ chan = client_config->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
@@ -1437,7 +1530,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
result->flags = pending_trb->info;
bb = bb_ctxt->ack_rp;
if (bb->bb_active) {
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Bounce buffer active chan %d, copying data\n",
chan);
}
@@ -1458,7 +1551,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
r = -ENODATA;
}
mutex_unlock(&cfg->chan_lock);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
return r;
@@ -1488,11 +1581,11 @@ int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt)
while (mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr, MHIREGLEN)
== 0xFFFFFFFF
&& j <= MHI_MAX_LINK_RETRIES) {
- mhi_log(MHI_MSG_CRITICAL,
- "Could not access device retry %d\n", j);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Could not access device retry %d\n", j);
msleep(MHI_LINK_STABILITY_WAIT_MS);
if (MHI_MAX_LINK_RETRIES == j) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Could not access device, FAILING!\n");
return -ETIME;
}
@@ -1503,9 +1596,12 @@ int mhi_wait_for_mdm(struct mhi_device_ctxt *mhi_dev_ctxt)
int mhi_get_max_desc(struct mhi_client_handle *client_handle)
{
+ struct mhi_client_config *client_config;
+
if (!client_handle)
return -EINVAL;
- return client_handle->chan_info.max_desc - 1;
+ client_config = client_handle->client_config;
+ return client_config->chan_info.max_desc - 1;
}
EXPORT_SYMBOL(mhi_get_max_desc);
@@ -1514,6 +1610,27 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
return MHI_EPID;
}
+void mhi_master_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ pm_runtime_get(&mhi_dev_ctxt->pcie_device->dev);
+}
+
+void mhi_master_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->pcie_device->dev);
+}
+
+void mhi_slave_mode_runtime_get(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ mhi_dev_ctxt->bus_master_rt_get(mhi_dev_ctxt->pcie_device);
+}
+
+void mhi_slave_mode_runtime_put(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ mhi_dev_ctxt->bus_master_rt_put(mhi_dev_ctxt->pcie_device);
+}
+
/*
* mhi_assert_device_wake - Set WAKE_DB register
* force_set - if true, will set bit regardless of counts
@@ -1572,16 +1689,17 @@ void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
{
- struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ struct mhi_client_config *client_config = client_handle->client_config;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_config->mhi_dev_ctxt;
unsigned long flags;
read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
/* Disable low power mode by asserting Wake */
if (enable_lpm == false)
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
else
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
@@ -1592,26 +1710,147 @@ EXPORT_SYMBOL(mhi_set_lpm);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index)
{
- mhi_log(MHI_MSG_INFO, "Setting bus request to index %d\n", index);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Setting bus request to index %d\n", index);
return msm_bus_scale_client_update_request(mhi_dev_ctxt->bus_client,
index);
}
-int mhi_deregister_channel(struct mhi_client_handle
- *client_handle) {
+int mhi_deregister_channel(struct mhi_client_handle *client_handle)
+{
int ret_val = 0;
int chan;
+ struct mhi_client_config *client_config;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
- if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
+ if (!client_handle)
return -EINVAL;
- chan = client_handle->chan_info.chan_nr;
- client_handle->magic = 0;
- client_handle->mhi_dev_ctxt->client_handle_list[chan] = NULL;
+
+ client_config = client_handle->client_config;
+ mhi_dev_ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
+ client_config->magic = 0;
+ mhi_dev_ctxt->client_handle_list[chan] = NULL;
+ disable_bb_ctxt(mhi_dev_ctxt, &mhi_dev_ctxt->chan_bb_list[chan]);
+ kfree(client_config);
kfree(client_handle);
return ret_val;
}
EXPORT_SYMBOL(mhi_deregister_channel);
+int mhi_register_device(struct mhi_device *mhi_device,
+ const char *node_name,
+ unsigned long user_data)
+{
+ const struct device_node *of_node;
+ struct mhi_device_ctxt *mhi_dev_ctxt = NULL, *itr;
+ struct pcie_core_info *core_info;
+ struct pci_dev *pci_dev = mhi_device->pci_dev;
+ u32 domain = pci_domain_nr(pci_dev->bus);
+ u32 bus = pci_dev->bus->number;
+ u32 dev_id = pci_dev->device;
+ u32 slot = PCI_SLOT(pci_dev->devfn);
+ int ret, i;
+
+ of_node = of_parse_phandle(mhi_device->dev->of_node, node_name, 0);
+ if (!of_node)
+ return -EINVAL;
+
+ if (!mhi_device_drv)
+ return -EPROBE_DEFER;
+
+ /* Traverse thru the list */
+ mutex_lock(&mhi_device_drv->lock);
+ list_for_each_entry(itr, &mhi_device_drv->head, node) {
+ struct platform_device *pdev = itr->plat_dev;
+ struct pcie_core_info *core = &itr->core;
+
+ if (pdev->dev.of_node == of_node &&
+ core->domain == domain &&
+ core->bus == bus &&
+ core->dev_id == dev_id &&
+ core->slot == slot) {
+ mhi_dev_ctxt = itr;
+ break;
+ }
+ }
+ mutex_unlock(&mhi_device_drv->lock);
+
+ /* perhaps we've not probed yet */
+ if (!mhi_dev_ctxt)
+ return -EPROBE_DEFER;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Registering Domain:%02u Bus:%04u dev:0x%04x slot:%04u\n",
+ domain, bus, dev_id, slot);
+
+ /* Set up pcie dev info */
+ mhi_dev_ctxt->pcie_device = pci_dev;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
+ INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+ INIT_WORK(&mhi_dev_ctxt->st_thread_worker, mhi_state_change_worker);
+ mutex_init(&mhi_dev_ctxt->pm_lock);
+ rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
+ spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
+ init_completion(&mhi_dev_ctxt->cmd_complete);
+ mhi_dev_ctxt->flags.link_up = 1;
+ core_info = &mhi_dev_ctxt->core;
+ core_info->manufact_id = pci_dev->vendor;
+ core_info->pci_master = false;
+
+ /* Go thru resources and set up */
+ for (i = 0; i < ARRAY_SIZE(mhi_device->resources); i++) {
+ const struct resource *res = &mhi_device->resources[i];
+
+ switch (resource_type(res)) {
+ case IORESOURCE_MEM:
+ /* bus master already mapped it */
+ core_info->bar0_base = (void __iomem *)res->start;
+ core_info->bar0_end = (void __iomem *)res->end;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "bar mapped to:0x%llx - 0x%llx (virtual)\n",
+ res->start, res->end);
+ break;
+ case IORESOURCE_IRQ:
+ core_info->irq_base = (u32)res->start;
+ core_info->max_nr_msis = (u32)resource_size(res);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "irq mapped to: %u size:%u\n",
+ core_info->irq_base,
+ core_info->max_nr_msis);
+ break;
+ };
+ }
+
+ if (!core_info->bar0_base || !core_info->irq_base)
+ return -EINVAL;
+
+ mhi_dev_ctxt->bus_master_rt_get = mhi_device->pm_runtime_get;
+ mhi_dev_ctxt->bus_master_rt_put = mhi_device->pm_runtime_noidle;
+ if (!mhi_dev_ctxt->bus_master_rt_get ||
+ !mhi_dev_ctxt->bus_master_rt_put)
+ return -EINVAL;
+
+ ret = mhi_ctxt_init(mhi_dev_ctxt);
+ if (ret) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n", ret);
+ return ret;
+ }
+ mhi_init_debugfs(mhi_dev_ctxt);
+
+ /* setup shadow pm functions */
+ mhi_dev_ctxt->assert_wake = mhi_assert_device_wake;
+ mhi_dev_ctxt->deassert_wake = mhi_deassert_device_wake;
+ mhi_dev_ctxt->runtime_get = mhi_slave_mode_runtime_get;
+ mhi_dev_ctxt->runtime_put = mhi_slave_mode_runtime_put;
+ mhi_device->mhi_dev_ctxt = mhi_dev_ctxt;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit success\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_register_device);
+
void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan,
@@ -1627,9 +1866,9 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
ring_ctxt = &mhi_dev_ctxt->
mhi_local_event_ctxt[chan];
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1637,10 +1876,9 @@ void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
ring_ctxt->db_mode.db_mode = 0;
} else {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
- chan,
- ring_ctxt->db_mode.brstmode,
+ chan, ring_ctxt->db_mode.brstmode,
ring_ctxt->db_mode.db_mode);
}
}
@@ -1650,10 +1888,9 @@ void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t chan,
u32 val)
{
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
-
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
}
@@ -1663,9 +1900,9 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
uintptr_t chan, u32 val)
{
- mhi_log(MHI_MSG_VERBOSE,
- "db.set addr: %p io_offset 0x%lx val:0x%x\n",
- io_addr, chan, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
@@ -1678,7 +1915,7 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
chan_ctxt->db_mode.db_mode = 0;
} else {
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
chan, chan_ctxt->db_mode.brstmode,
chan_ctxt->db_mode.db_mode);
@@ -1714,10 +1951,9 @@ void mhi_reg_write(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t io_offset, u32 val)
{
- mhi_log(MHI_MSG_RAW, "d.s 0x%p off: 0x%lx 0x%x\n",
- io_addr, io_offset, val);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_RAW,
+ "d.s 0x%p off: 0x%lx 0x%x\n", io_addr, io_offset, val);
iowrite32(val, io_addr + io_offset);
-
/* Flush write to device */
wmb();
}
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index b4447378683e..a991a2e68b34 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -29,93 +29,79 @@
int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
- u32 expiry_counter;
unsigned long flags;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned long timeout;
- mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
- read_lock_irqsave(pm_xfer_lock, flags);
- if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
- read_unlock_irqrestore(pm_xfer_lock, flags);
- return -EIO;
- }
- pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT);
- read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
- if (pcie_word_val == 0xFFFFFFFF)
- return -ENOTCONN;
-
- while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
- expiry_counter++;
- mhi_log(MHI_MSG_ERROR,
- "Device is not RESET, sleeping and retrying.\n");
- msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for MMIO RESET bit to be cleared.\n");
+
+ timeout = jiffies +
+ msecs_to_jiffies(mhi_dev_ctxt->poll_reset_timeout_ms);
+ while (time_before(jiffies, timeout)) {
read_lock_irqsave(pm_xfer_lock, flags);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_irqrestore(pm_xfer_lock, flags);
return -EIO;
}
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHICTRL);
+ MHICTRL);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (pcie_word_val == 0xFFFFFFFF)
+ return -ENOTCONN;
MHI_READ_FIELD(pcie_word_val,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT);
- read_unlock_irqrestore(pm_xfer_lock, flags);
- }
+ MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT);
- if (MHI_STATE_READY != pcie_word_val)
- return -ENOTCONN;
- return 0;
+ if (!pcie_word_val)
+ return 0;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI still in Reset sleeping\n");
+ msleep(MHI_THREAD_SLEEP_TIMEOUT_MS);
+ }
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Timeout waiting for reset to be cleared\n");
+ return -ETIMEDOUT;
}
int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
- u32 expiry_counter;
unsigned long flags;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ unsigned long timeout;
- mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Waiting for MMIO Ready bit to be set\n");
- read_lock_irqsave(pm_xfer_lock, flags);
- if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
- read_unlock_irqrestore(pm_xfer_lock, flags);
- return -EIO;
- }
- /* Read MMIO and poll for READY bit to be set */
- pcie_word_val = mhi_reg_read(
- mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHISTATUS_READY_MASK,
- MHISTATUS_READY_SHIFT);
- read_unlock_irqrestore(pm_xfer_lock, flags);
-
- if (pcie_word_val == 0xFFFFFFFF)
- return -ENOTCONN;
- expiry_counter = 0;
- while (MHI_STATE_READY != pcie_word_val && expiry_counter < 50) {
- expiry_counter++;
- mhi_log(MHI_MSG_ERROR,
- "Device is not ready, sleeping and retrying.\n");
- msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ timeout = jiffies +
+ msecs_to_jiffies(mhi_dev_ctxt->poll_reset_timeout_ms);
+ while (time_before(jiffies, timeout)) {
+ /* Read MMIO and poll for READY bit to be set */
read_lock_irqsave(pm_xfer_lock, flags);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_irqrestore(pm_xfer_lock, flags);
return -EIO;
}
+
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
- MHI_READ_FIELD(pcie_word_val,
- MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
read_unlock_irqrestore(pm_xfer_lock, flags);
+ if (pcie_word_val == 0xFFFFFFFF)
+ return -ENOTCONN;
+ MHI_READ_FIELD(pcie_word_val,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT);
+ if (pcie_word_val == MHI_STATE_READY)
+ return 0;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device is not ready, sleeping and retrying.\n");
+ msleep(MHI_THREAD_SLEEP_TIMEOUT_MS);
}
-
- if (pcie_word_val != MHI_STATE_READY)
- return -ETIMEDOUT;
- return 0;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Device timed out waiting for ready\n");
+ return -ETIMEDOUT;
}
int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -125,28 +111,26 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i = 0;
int ret_val;
- mhi_log(MHI_MSG_INFO, "~~~ Initializing MMIO ~~~\n");
- mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->dev_props->bar0_base;
-
- mhi_log(MHI_MSG_INFO, "Bar 0 address is at: 0x%p\n",
- mhi_dev_ctxt->mmio_info.mmio_addr);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "~~~ Initializing MMIO ~~~\n");
+ mhi_dev_ctxt->mmio_info.mmio_addr = mhi_dev_ctxt->core.bar0_base;
mhi_dev_ctxt->mmio_info.mmio_len = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr,
MHIREGLEN);
if (0 == mhi_dev_ctxt->mmio_info.mmio_len) {
- mhi_log(MHI_MSG_ERROR, "Received mmio length as zero\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Received mmio length as zero\n");
return -EIO;
}
- mhi_log(MHI_MSG_INFO, "Testing MHI Ver\n");
- mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Testing MHI Ver\n");
+ mhi_dev_ctxt->core.mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
- if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
- mhi_log(MHI_MSG_CRITICAL,
- "Bad MMIO version, 0x%x\n",
- mhi_dev_ctxt->dev_props->mhi_ver);
+ if (mhi_dev_ctxt->core.mhi_ver != MHI_VERSION) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Bad MMIO version, 0x%x\n", mhi_dev_ctxt->core.mhi_ver);
return ret_val;
}
@@ -159,9 +143,10 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
else
chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
}
- mhi_log(MHI_MSG_INFO,
- "Read back MMIO Ready bit successfully. Moving on..\n");
- mhi_log(MHI_MSG_INFO, "Reading channel doorbell offset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Read back MMIO Ready bit successfully. Moving on..\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reading channel doorbell offset\n");
mhi_dev_ctxt->mmio_info.chan_db_addr =
mhi_dev_ctxt->mmio_info.mmio_addr;
@@ -173,13 +158,15 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
CHDBOFF, CHDBOFF_CHDBOFF_MASK,
CHDBOFF_CHDBOFF_SHIFT);
- mhi_log(MHI_MSG_INFO, "Reading event doorbell offset\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Reading event doorbell offset\n");
mhi_dev_ctxt->mmio_info.event_db_addr += mhi_reg_read_field(
mhi_dev_ctxt->mmio_info.mmio_addr,
ERDBOFF, ERDBOFF_ERDBOFF_MASK,
ERDBOFF_ERDBOFF_SHIFT);
- mhi_log(MHI_MSG_INFO, "Setting all MMIO values.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Setting all MMIO values.\n");
mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
MHICFG,
@@ -290,7 +277,7 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK,
MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT,
pcie_word_val);
- mhi_log(MHI_MSG_INFO, "Done..\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done..\n");
return 0;
}
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index 2f44601e225e..d7a4f7aa93ef 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -20,14 +20,17 @@
#include "mhi_sys.h"
#include "mhi.h"
#include "mhi_hwio.h"
+#include "mhi_bhi.h"
/* Write only sysfs attributes */
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
+static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
/* Read only sysfs attributes */
static struct attribute *mhi_attributes[] = {
&dev_attr_MHI_M0.attr,
+ &dev_attr_MHI_M3.attr,
NULL,
};
@@ -38,9 +41,9 @@ static struct attribute_group mhi_attribute_group = {
int mhi_pci_suspend(struct device *dev)
{
int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
- mhi_log(MHI_MSG_INFO, "Entered\n");
-
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
/* if rpm status still active then force suspend */
if (!pm_runtime_status_suspended(dev)) {
r = mhi_runtime_suspend(dev);
@@ -51,86 +54,134 @@ int mhi_pci_suspend(struct device *dev)
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
- mhi_log(MHI_MSG_INFO, "Exit\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit\n");
return r;
}
-int mhi_runtime_suspend(struct device *dev)
+static int mhi_pm_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_m3)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mutex_lock(&mhi_dev_ctxt->pm_lock);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
-
- mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with State:0x%x %s\n",
mhi_dev_ctxt->mhi_pm_state,
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
/* Link is already disabled */
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
- mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Already in M3 State\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
return 0;
}
- if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
- mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake) &&
+ force_m3 == false)){
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Busy, Aborting M3\n");
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
return -EBUSY;
}
- mhi_assert_device_wake(mhi_dev_ctxt, false);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
if (!r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get M0||M1 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- r = -EIO;
- goto rpm_suspend_exit;
+ return -EIO;
}
- mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Allowing M3 State\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
- "Waiting for M3 completion.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Waiting for M3 completion.\n");
r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
if (!r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to get M3 event, timeout, current state:%s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- r = -EIO;
- goto rpm_suspend_exit;
+ return -EIO;
}
+ return 0;
+}
+
+static int mhi_pm_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int r;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with State:0x%x %s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* Set and wait for M0 Event */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to get M0 event, timeout\n");
+ r = -EIO;
+ } else
+ r = 0;
+
+ return r;
+}
+
+int mhi_runtime_suspend(struct device *dev)
+{
+ int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
+
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ r = mhi_pm_initiate_m3(mhi_dev_ctxt, false);
+ if (r) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "abort due to ret:%d\n", r);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return r;
+ }
r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Turn off link ret:%d\n", r);
}
-rpm_suspend_exit:
- mhi_log(MHI_MSG_INFO, "Exited\n");
mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited with ret:%d\n", r);
+
return r;
}
int mhi_runtime_idle(struct device *dev)
{
- mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered returning -EBUSY\n");
/*
* RPM framework during runtime resume always calls
@@ -150,7 +201,7 @@ int mhi_runtime_idle(struct device *dev)
int mhi_runtime_resume(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
mutex_lock(&mhi_dev_ctxt->pm_lock);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
@@ -159,45 +210,24 @@ int mhi_runtime_resume(struct device *dev)
/* turn on link */
r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- goto rpm_resume_exit;
- }
-
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
-
- /* Set and wait for M0 Event */
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (!r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to get M0 event, timeout\n");
- r = -EIO;
+ if (r)
goto rpm_resume_exit;
- }
- r = 0; /* no errors */
+ r = mhi_pm_initiate_m0(mhi_dev_ctxt);
rpm_resume_exit:
mutex_unlock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited with :%d\n", r);
return r;
}
int mhi_pci_resume(struct device *dev)
{
int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
r = mhi_runtime_resume(dev);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to resume link\n");
} else {
pm_runtime_set_active(dev);
@@ -207,6 +237,97 @@ int mhi_pci_resume(struct device *dev)
return r;
}
+static int mhi_pm_slave_mode_power_on(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int ret_val;
+ u32 timeout = mhi_dev_ctxt->poll_reset_timeout_ms;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
+ ret_val = set_mhi_base_state(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ if (ret_val) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Error Setting MHI Base State %d\n", ret_val);
+ goto unlock_pm_lock;
+ }
+
+ if (mhi_dev_ctxt->base_state != STATE_TRANSITION_BHI) {
+ ret_val = -EIO;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid Base State, cur_state:%s\n",
+ state_transition_str(mhi_dev_ctxt->base_state));
+ goto unlock_pm_lock;
+ }
+
+ reinit_completion(&mhi_dev_ctxt->cmd_complete);
+ init_mhi_base_state(mhi_dev_ctxt);
+
+ /*
+ * Keep wake in Active until AMSS, @ AMSS we will
+ * decrement counts
+ */
+ read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, false);
+ read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ ret_val = wait_for_completion_timeout(&mhi_dev_ctxt->cmd_complete,
+ msecs_to_jiffies(timeout));
+ if (!ret_val || mhi_dev_ctxt->dev_exec_env != MHI_EXEC_ENV_AMSS)
+ ret_val = -EIO;
+ else
+ ret_val = 0;
+
+ if (ret_val) {
+ read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
+ read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ }
+
+unlock_pm_lock:
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", ret_val);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return ret_val;
+}
+
+static int mhi_pm_slave_mode_suspend(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int r;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+
+ r = mhi_pm_initiate_m3(mhi_dev_ctxt, false);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "abort due to ret:%d\n", r);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", r);
+
+ return r;
+}
+
+static int mhi_pm_slave_mode_resume(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ int r;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+
+ r = mhi_pm_initiate_m0(mhi_dev_ctxt);
+ if (r)
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "M3 exit failed ret:%d\n", r);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exit with ret:%d\n", r);
+
+ return r;
+}
+
int mhi_init_pm_sysfs(struct device *dev)
{
return sysfs_create_group(&dev->kobj, &mhi_attribute_group);
@@ -220,12 +341,29 @@ void mhi_rem_pm_sysfs(struct device *dev)
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ pm_runtime_get(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->pcie_device->dev);
+
+ return count;
+}
+
+ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = dev_get_drvdata(dev);
+
+ if (atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->
+ pcie_device->dev);
+ }
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return count;
}
@@ -234,45 +372,45 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
struct pci_dev *pcie_dev;
int r = 0;
- mhi_log(MHI_MSG_INFO, "Entered...\n");
- pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered...\n");
+ pcie_dev = mhi_dev_ctxt->pcie_device;
if (0 == mhi_dev_ctxt->flags.link_up) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Link already marked as down, nothing to do\n");
goto exit;
}
r = pci_save_state(pcie_dev);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to save pcie state ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to save pcie state ret: %d\n", r);
}
- mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+ mhi_dev_ctxt->core.pcie_state = pci_store_saved_state(pcie_dev);
pci_disable_device(pcie_dev);
r = pci_set_power_state(pcie_dev, PCI_D3hot);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to set pcie power state to D3 hot ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set pcie power state to D3hot ret:%d\n", r);
}
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
pcie_dev->bus->number,
pcie_dev,
- NULL,
- 0);
+ NULL,
+ 0);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Failed to suspend pcie bus ret 0x%x\n", r);
r = mhi_set_bus_request(mhi_dev_ctxt, 0);
if (r)
- mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Failed to set bus freq ret %d\n", r);
mhi_dev_ctxt->flags.link_up = 0;
exit:
- mhi_log(MHI_MSG_INFO, "Exited...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited...\n");
+
return 0;
}
@@ -281,17 +419,16 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
int r = 0;
struct pci_dev *pcie_dev;
- pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
+ pcie_dev = mhi_dev_ctxt->pcie_device;
- mhi_log(MHI_MSG_INFO, "Entered...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered...\n");
if (mhi_dev_ctxt->flags.link_up)
goto exit;
r = mhi_set_bus_request(mhi_dev_ctxt, 1);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Could not set bus frequency ret: %d\n", r);
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
pcie_dev->bus->number,
@@ -299,24 +436,50 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
NULL,
0);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume pcie bus ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to resume pcie bus ret %d\n", r);
goto exit;
}
r = pci_enable_device(pcie_dev);
if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to enable device ret:%d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to enable device ret:%d\n", r);
pci_load_and_free_saved_state(pcie_dev,
- &mhi_dev_ctxt->dev_props->pcie_state);
+ &mhi_dev_ctxt->core.pcie_state);
pci_restore_state(pcie_dev);
pci_set_master(pcie_dev);
mhi_dev_ctxt->flags.link_up = 1;
exit:
- mhi_log(MHI_MSG_INFO, "Exited...\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited...\n");
return r;
}
+
+int mhi_pm_control_device(struct mhi_device *mhi_device,
+ enum mhi_dev_ctrl ctrl)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->mhi_dev_ctxt;
+
+ if (!mhi_dev_ctxt)
+ return -EINVAL;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered with cmd:%d\n", ctrl);
+
+ switch (ctrl) {
+ case MHI_DEV_CTRL_INIT:
+ return bhi_probe(mhi_dev_ctxt);
+ case MHI_DEV_CTRL_POWER_ON:
+ return mhi_pm_slave_mode_power_on(mhi_dev_ctxt);
+ case MHI_DEV_CTRL_SUSPEND:
+ return mhi_pm_slave_mode_suspend(mhi_dev_ctxt);
+ case MHI_DEV_CTRL_RESUME:
+ return mhi_pm_slave_mode_resume(mhi_dev_ctxt);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL(mhi_pm_control_device);
diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c
index 07d0098a1b61..e15055f7db9c 100644
--- a/drivers/platform/msm/mhi/mhi_ring_ops.c
+++ b/drivers/platform/msm/mhi/mhi_ring_ops.c
@@ -21,7 +21,6 @@ static int add_element(struct mhi_ring *ring, void **rp,
if (NULL == ring || 0 == ring->el_size
|| NULL == ring->base || 0 == ring->len) {
- mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return -EINVAL;
}
@@ -39,8 +38,6 @@ static int add_element(struct mhi_ring *ring, void **rp,
if (ring->overwrite_en) {
ctxt_del_element(ring, NULL);
} else {
- mhi_log(MHI_MSG_INFO, "Ring 0x%lX is full\n",
- (uintptr_t)ring->base);
return -ENOSPC;
}
}
@@ -92,8 +89,6 @@ int delete_element(struct mhi_ring *ring, void **rp,
if (r)
return r;
if (d_wp == d_rp) {
- mhi_log(MHI_MSG_VERBOSE, "Ring 0x%lx is empty\n",
- (uintptr_t)ring->base);
if (NULL != assigned_addr)
*assigned_addr = NULL;
return -ENODATA;
@@ -113,23 +108,26 @@ int delete_element(struct mhi_ring *ring, void **rp,
int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
+ struct mhi_client_config *client_config;
struct mhi_device_ctxt *ctxt;
int bb_ring, ch_ring;
- if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
- !client_handle->mhi_dev_ctxt)
+ if (!client_handle)
return -EINVAL;
- ctxt = client_handle->mhi_dev_ctxt;
- chan = client_handle->chan_info.chan_nr;
+ client_config = client_handle->client_config;
+ ctxt = client_config->mhi_dev_ctxt;
+ chan = client_config->chan_info.chan_nr;
- bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
- ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+ bb_ring = get_nr_avail_ring_elements(ctxt, &ctxt->chan_bb_list[chan]);
+ ch_ring = get_nr_avail_ring_elements(ctxt,
+ &ctxt->mhi_local_chan_ctxt[chan]);
return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);
-int get_nr_avail_ring_elements(struct mhi_ring *ring)
+int get_nr_avail_ring_elements(struct mhi_device_ctxt *mhi_dev_ctxt,
+ struct mhi_ring *ring)
{
u32 nr_el = 0;
uintptr_t ring_size = 0;
@@ -138,7 +136,7 @@ int get_nr_avail_ring_elements(struct mhi_ring *ring)
ring_size = ring->len / ring->el_size;
ret_val = get_nr_enclosed_el(ring, ring->rp, ring->wp, &nr_el);
if (ret_val != 0) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to get enclosed el ret %d.\n", ret_val);
return 0;
}
@@ -155,19 +153,14 @@ int get_nr_enclosed_el(struct mhi_ring *ring, void *rp,
if (NULL == ring || 0 == ring->el_size ||
NULL == ring->base || 0 == ring->len) {
- mhi_log(MHI_MSG_ERROR, "Bad input parameters, quitting.\n");
return -EINVAL;
}
r = get_element_index(ring, rp, &index_rp);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL, "Bad element index rp 0x%p.\n", rp);
+ if (r)
return r;
- }
r = get_element_index(ring, wp, &index_wp);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL, "Bad element index wp 0x%p.\n", wp);
+ if (r)
return r;
- }
ring_size = ring->len / ring->el_size;
if (index_rp < index_wp)
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index defd6f4fd137..22481dede21a 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,40 +24,35 @@
static int mhi_ssr_notify_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
-
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ container_of(nb, struct mhi_device_ctxt, mhi_ssr_nb);
switch (action) {
case SUBSYS_BEFORE_POWERUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event BEFORE_POWERUP\n");
break;
case SUBSYS_AFTER_POWERUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event AFTER_POWERUP\n");
break;
case SUBSYS_POWERUP_FAILURE:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event POWERUP_FAILURE\n");
break;
case SUBSYS_BEFORE_SHUTDOWN:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event BEFORE_SHUTDOWN\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
case SUBSYS_AFTER_SHUTDOWN:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event AFTER_SHUTDOWN\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
case SUBSYS_RAMDUMP_NOTIFICATION:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received Subsystem event RAMDUMP\n");
- mhi_log(MHI_MSG_INFO,
- "Not notifying clients\n");
break;
default:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received ESOC notifcation %d, NOT handling\n",
(int)action);
break;
@@ -65,36 +60,30 @@ static int mhi_ssr_notify_cb(struct notifier_block *nb,
return NOTIFY_OK;
}
-static struct notifier_block mhi_ssr_nb = {
- .notifier_call = mhi_ssr_notify_cb,
-};
-
int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int ret_val = 0;
struct device_node *np;
- struct pci_driver *mhi_driver;
- struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev;
+ struct device *dev = &mhi_dev_ctxt->pcie_device->dev;
- mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver;
np = dev->of_node;
mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm");
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Of table of pcie struct device property is dev->of_node %p\n",
np);
if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_handle)) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to register for SSR, ret %lx\n",
(uintptr_t)mhi_dev_ctxt->esoc_handle);
return -EIO;
}
-
+ mhi_dev_ctxt->mhi_ssr_nb.notifier_call = mhi_ssr_notify_cb;
mhi_dev_ctxt->esoc_ssr_handle = subsys_notif_register_notifier(
mhi_dev_ctxt->esoc_handle->name,
- &mhi_ssr_nb);
+ &mhi_dev_ctxt->mhi_ssr_nb);
if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_ssr_handle)) {
ret_val = PTR_RET(mhi_dev_ctxt->esoc_ssr_handle);
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Can't find esoc desc ret 0x%lx\n",
(uintptr_t)mhi_dev_ctxt->esoc_ssr_handle);
}
@@ -107,18 +96,25 @@ void mhi_notify_client(struct mhi_client_handle *client_handle,
{
struct mhi_cb_info cb_info = {0};
struct mhi_result result = {0};
+ struct mhi_client_config *client_config;
cb_info.result = NULL;
cb_info.cb_reason = reason;
- if (NULL != client_handle &&
- NULL != client_handle->client_info.mhi_client_cb) {
- result.user_data = client_handle->user_data;
- cb_info.chan = client_handle->chan_info.chan_nr;
+ if (client_handle == NULL)
+ return;
+
+ client_config = client_handle->client_config;
+
+ if (client_config->client_info.mhi_client_cb) {
+ result.user_data = client_config->user_data;
+ cb_info.chan = client_config->chan_info.chan_nr;
cb_info.result = &result;
- mhi_log(MHI_MSG_INFO, "Calling back for chan %d, reason %d\n",
- cb_info.chan, reason);
- client_handle->client_info.mhi_client_cb(&cb_info);
+ mhi_log(client_config->mhi_dev_ctxt, MHI_MSG_INFO,
+ "Calling back for chan %d, reason %d\n",
+ cb_info.chan,
+ reason);
+ client_config->client_info.mhi_client_cb(&cb_info);
}
}
@@ -136,16 +132,22 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
}
}
-int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->bhi_ctxt.bhi_base = mhi_pcie_dev->core.bar0_base;
- pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base, BHIOFF);
- mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
- pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
+ mhi_dev_ctxt->bhi_ctxt.bhi_base = mhi_dev_ctxt->core.bar0_base;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base, BHIOFF);
+
+ /* confirm it's a valid reading */
+ if (unlikely(pcie_word_val == U32_MAX)) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid BHI Offset:0x%x\n", pcie_word_val);
+ return -EIO;
+ }
+ mhi_dev_ctxt->bhi_ctxt.bhi_base += pcie_word_val;
+ pcie_word_val = mhi_reg_read(mhi_dev_ctxt->bhi_ctxt.bhi_base,
BHI_EXECENV);
mhi_dev_ctxt->dev_exec_env = pcie_word_val;
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
@@ -153,55 +155,50 @@ int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
} else {
- mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Invalid EXEC_ENV: 0x%x\n",
pcie_word_val);
r = -EIO;
}
- mhi_log(MHI_MSG_INFO, "EXEC_ENV: %d Base state %d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "EXEC_ENV: %d Base state %d\n",
pcie_word_val, mhi_dev_ctxt->base_state);
return r;
}
void mhi_link_state_cb(struct msm_pcie_notify *notify)
{
-
- struct mhi_pcie_dev_info *mhi_pcie_dev;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- if (NULL == notify || NULL == notify->data) {
- mhi_log(MHI_MSG_CRITICAL,
- "Incomplete handle received\n");
+ if (!notify || !notify->data) {
+ pr_err("%s: incomplete handle received\n", __func__);
return;
}
- mhi_pcie_dev = notify->data;
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
+ mhi_dev_ctxt = notify->data;
switch (notify->event) {
case MSM_PCIE_EVENT_LINKDOWN:
- mhi_log(MHI_MSG_INFO, "Received MSM_PCIE_EVENT_LINKDOWN\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Received MSM_PCIE_EVENT_LINKDOWN\n");
break;
case MSM_PCIE_EVENT_LINKUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_LINKUP\n");
- mhi_pcie_dev->link_up_cntr++;
+ mhi_dev_ctxt->counters.link_up_cntr++;
break;
case MSM_PCIE_EVENT_WAKEUP:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_WAKE\n");
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
if (mhi_dev_ctxt->flags.mhi_initialized) {
- pm_runtime_get(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_runtime_put_noidle(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ mhi_dev_ctxt->runtime_get(mhi_dev_ctxt);
+ mhi_dev_ctxt->runtime_put(mhi_dev_ctxt);
}
break;
default:
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Received bad link event\n");
return;
}
@@ -213,9 +210,9 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to start state change event, to %d\n",
- mhi_dev_ctxt->base_state);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
+ "Failed to start state change event, to %d\n",
+ mhi_dev_ctxt->base_state);
}
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index 1021a56d1b3d..a4da6c21b50d 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,24 +19,24 @@
const char *state_transition_str(enum STATE_TRANSITION state)
{
- static const char * const mhi_states_transition_str[] = {
- "RESET",
- "READY",
- "M0",
- "M1",
- "M2",
- "M3",
- "BHI",
- "SBL",
- "AMSS",
- "LINK_DOWN",
- "WAKE"
+ static const char * const
+ mhi_states_transition_str[STATE_TRANSITION_MAX] = {
+ [STATE_TRANSITION_RESET] = "RESET",
+ [STATE_TRANSITION_READY] = "READY",
+ [STATE_TRANSITION_M0] = "M0",
+ [STATE_TRANSITION_M1] = "M1",
+ [STATE_TRANSITION_M2] = "M2",
+ [STATE_TRANSITION_M3] = "M3",
+ [STATE_TRANSITION_BHI] = "BHI",
+ [STATE_TRANSITION_SBL] = "SBL",
+ [STATE_TRANSITION_AMSS] = "AMSS",
+ [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
+ [STATE_TRANSITION_WAKE] = "WAKE",
+ [STATE_TRANSITION_BHIE] = "BHIE",
+ [STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
};
- if (state == STATE_TRANSITION_SYS_ERR)
- return "SYS_ERR";
-
- return (state <= STATE_TRANSITION_WAKE) ?
+ return (state < STATE_TRANSITION_MAX) ?
mhi_states_transition_str[state] : "Invalid";
}
@@ -94,7 +94,7 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Ringing chan dbs\n");
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
@@ -115,7 +115,7 @@ static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u64 rp = 0;
struct mhi_ring *local_ctxt = NULL;
- mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE, "Ringing chan dbs\n");
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
@@ -158,12 +158,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
}
}
-static int process_m0_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
+static int process_bhie_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum STATE_TRANSITION cur_work_item)
{
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
+ mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
+
+ return 0;
+}
- mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+int process_m0_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ unsigned long flags;
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered With State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
switch (mhi_dev_ctxt->mhi_state) {
@@ -177,12 +188,12 @@ static int process_m0_transition(
break;
}
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_assert_device_wake(mhi_dev_ctxt, true);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
@@ -190,10 +201,11 @@ static int process_m0_transition(
ring_all_cmd_dbs(mhi_dev_ctxt);
}
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
+
return 0;
}
@@ -207,7 +219,7 @@ void process_m1_transition(struct work_struct *work)
mutex_lock(&mhi_dev_ctxt->pm_lock);
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Processing M1 state transition from state %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -218,7 +230,8 @@ void process_m1_transition(struct work_struct *work)
return;
}
- mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Transitioning to M2 Transition\n");
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
mhi_dev_ctxt->counters.m1_m2++;
mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
@@ -230,34 +243,32 @@ void process_m1_transition(struct work_struct *work)
/* During DEBOUNCE Time We could be receiving M0 Event */
if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
- mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Entered M2 State\n");
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
}
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
- mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Exiting M2 Immediately, count:%d\n",
atomic_read(&mhi_dev_ctxt->counters.device_wake));
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_assert_device_wake(mhi_dev_ctxt, true);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->assert_wake(mhi_dev_ctxt, true);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- } else {
- mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
- pm_request_autosuspend(&mhi_dev_ctxt->
- dev_info->pcie_device->dev);
+ } else if (mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->pcie_device->dev);
}
mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
-static int process_m3_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
+int process_m3_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
{
-
- mhi_log(MHI_MSG_INFO,
+ unsigned long flags;
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered with State %s\n",
TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
@@ -272,45 +283,24 @@ static int process_m3_transition(
break;
}
- write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
- write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ write_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
-static int process_link_down_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(MHI_MSG_INFO,
- "Entered with State %s\n",
- TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- return -EIO;
-}
-
-static int process_wake_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(MHI_MSG_INFO,
- "Entered with State %s\n",
- TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- return -EIO;
-
-}
-
static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -320,11 +310,12 @@ static int process_ready_transition(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing READY state transition\n");
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to reset thread queues\n");
return r;
}
@@ -335,7 +326,7 @@ static int process_ready_transition(
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failure during MMIO initialization\n");
return r;
}
@@ -344,13 +335,12 @@ static int process_ready_transition(
cur_work_item);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (r) {
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failure during event ring init\n");
return r;
}
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_dev_ctxt->flags.stop_threads = 0;
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
@@ -379,7 +369,8 @@ static int process_reset_transition(
enum STATE_TRANSITION cur_work_item)
{
int r = 0, i = 0;
- mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing RESET state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -387,11 +378,12 @@ static int process_reset_transition(
mhi_dev_ctxt->counters.mhi_reset_cntr++;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
if (r)
- mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Device not RESET ret %d\n", r);
r = mhi_test_for_device_ready(mhi_dev_ctxt);
if (r) {
- mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
- r);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "timed out waiting for ready ret:%d\n", r);
return r;
}
@@ -417,22 +409,12 @@ static int process_reset_transition(
r = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_READY);
if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to initiate %s state trans\n",
state_transition_str(STATE_TRANSITION_READY));
return r;
}
-static int process_syserr_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
-{
- mhi_log(MHI_MSG_INFO,
- "Entered with State %s\n",
- TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- return -EIO;
-}
-
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_EXEC_ENV exec_env)
{
@@ -443,7 +425,8 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
cb_info.cb_reason = MHI_CB_MHI_ENABLED;
- mhi_log(MHI_MSG_INFO, "Enabling Clients, exec env %d.\n", exec_env);
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Enabling Clients, exec env %d.\n", exec_env);
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
if (!VALID_CHAN_NR(i))
continue;
@@ -455,14 +438,15 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
}
- mhi_log(MHI_MSG_INFO, "Done.\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Done.\n");
}
static int process_sbl_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Enabled\n");
+
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enabled\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -476,7 +460,8 @@ static int process_amss_transition(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Processing AMSS state transition\n");
write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
@@ -486,41 +471,40 @@ static int process_amss_transition(
cur_work_item);
mhi_dev_ctxt->flags.mhi_initialized = 1;
if (r) {
- mhi_log(MHI_MSG_CRITICAL,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to set local chan state ret %d\n", r);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
return r;
}
- read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- ring_all_chan_dbs(mhi_dev_ctxt, true);
- read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
} else {
- mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "MHI is initialized\n");
}
- read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- ring_all_ev_dbs(mhi_dev_ctxt);
- read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ complete(&mhi_dev_ctxt->cmd_complete);
/*
* runtime_allow will decrement usage_count, counts were
* incremented by pci fw pci_pm_init() or by
* mhi shutdown/ssr apis.
*/
- mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
+ if (mhi_dev_ctxt->core.pci_master) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Allow runtime suspend\n");
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->pcie_device->dev);
+ pm_runtime_allow(&mhi_dev_ctxt->pcie_device->dev);
+ }
/* During probe we incremented, releasing that count */
read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->deassert_wake(mhi_dev_ctxt);
read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -530,7 +514,8 @@ static int process_stt_work_item(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+ mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
+ "Transitioning to %s\n",
state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
@@ -549,25 +534,11 @@ static int process_stt_work_item(
case STATE_TRANSITION_AMSS:
r = process_amss_transition(mhi_dev_ctxt, cur_work_item);
break;
- case STATE_TRANSITION_M0:
- r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
- break;
- case STATE_TRANSITION_M3:
- r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
- break;
- case STATE_TRANSITION_SYS_ERR:
- r = process_syserr_transition(mhi_dev_ctxt,
- cur_work_item);
- break;
- case STATE_TRANSITION_LINK_DOWN:
- r = process_link_down_transition(mhi_dev_ctxt,
- cur_work_item);
- break;
- case STATE_TRANSITION_WAKE:
- r = process_wake_transition(mhi_dev_ctxt, cur_work_item);
+ case STATE_TRANSITION_BHIE:
+ r = process_bhie_transition(mhi_dev_ctxt, cur_work_item);
break;
default:
- mhi_log(MHI_MSG_ERROR,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Unrecongized state: %s\n",
state_transition_str(cur_work_item));
break;
@@ -575,46 +546,26 @@ static int process_stt_work_item(
return r;
}
-int mhi_state_change_thread(void *ctxt)
+void mhi_state_change_worker(struct work_struct *work)
{
int r = 0;
- unsigned long flags = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = (struct mhi_device_ctxt *)ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = container_of(work,
+ struct mhi_device_ctxt,
+ st_thread_worker);
enum STATE_TRANSITION cur_work_item;
struct mhi_state_work_queue *work_q =
&mhi_dev_ctxt->state_change_work_item_list;
struct mhi_ring *state_change_q = &work_q->q_info;
- if (NULL == mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Got bad context, quitting\n");
- return -EIO;
- }
- for (;;) {
- r = wait_event_interruptible(
- *mhi_dev_ctxt->mhi_ev_wq.state_change_event,
- ((work_q->q_info.rp != work_q->q_info.wp) &&
- !mhi_dev_ctxt->flags.st_thread_stopped));
- if (r) {
- mhi_log(MHI_MSG_INFO,
- "Caught signal %d, quitting\n", r);
- return 0;
- }
-
- if (mhi_dev_ctxt->flags.kill_threads) {
- mhi_log(MHI_MSG_INFO,
- "Caught exit signal, quitting\n");
- return 0;
- }
- mhi_dev_ctxt->flags.st_thread_stopped = 0;
- spin_lock_irqsave(work_q->q_lock, flags);
+ while (work_q->q_info.rp != work_q->q_info.wp) {
+ spin_lock_irq(work_q->q_lock);
cur_work_item = *(enum STATE_TRANSITION *)(state_change_q->rp);
r = ctxt_del_element(&work_q->q_info, NULL);
MHI_ASSERT(r == 0,
"Failed to delete element from STT workqueue\n");
- spin_unlock_irqrestore(work_q->q_lock, flags);
+ spin_unlock_irq(work_q->q_lock);
r = process_stt_work_item(mhi_dev_ctxt, cur_work_item);
}
- return 0;
}
/**
@@ -638,16 +589,17 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
&mhi_dev_ctxt->state_change_work_item_list;
spin_lock_irqsave(work_q->q_lock, flags);
- nr_avail_work_items = get_nr_avail_ring_elements(stt_ring);
+ nr_avail_work_items =
+ get_nr_avail_ring_elements(mhi_dev_ctxt, stt_ring);
BUG_ON(nr_avail_work_items <= 0);
- mhi_log(MHI_MSG_VERBOSE,
+ mhi_log(mhi_dev_ctxt, MHI_MSG_VERBOSE,
"Processing state transition %s\n",
state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
spin_unlock_irqrestore(work_q->q_lock, flags);
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
+ schedule_work(&mhi_dev_ctxt->st_thread_worker);
return r;
}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index c5c025b8585a..3389de2f95b3 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -51,13 +51,13 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
{
int amnt_copied = 0;
struct mhi_chan_ctxt *chan_ctxt;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
uintptr_t v_wp_index;
uintptr_t v_rp_index;
int valid_chan = 0;
struct mhi_chan_ctxt *cc_list;
struct mhi_client_handle *client_handle;
+ struct mhi_client_config *client_config;
int pkts_queued;
if (NULL == mhi_dev_ctxt)
@@ -76,6 +76,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
continue;
}
client_handle = mhi_dev_ctxt->client_handle_list[*offp];
+ client_config = client_handle->client_config;
valid_chan = 1;
}
@@ -87,8 +88,9 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
&v_wp_index);
- pkts_queued = client_handle->chan_info.max_desc -
- get_nr_avail_ring_elements(&mhi_dev_ctxt->
+ pkts_queued = client_config->chan_info.max_desc -
+ get_nr_avail_ring_elements(mhi_dev_ctxt,
+ &mhi_dev_ctxt->
mhi_local_chan_ctxt[*offp]) - 1;
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
@@ -115,7 +117,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
"pkts_queued",
pkts_queued,
"/",
- client_handle->chan_info.max_desc,
+ client_config->chan_info.max_desc,
"bb_used:",
mhi_dev_ctxt->counters.bb_used[*offp]);
@@ -128,9 +130,16 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
return -ENOMEM;
}
+int mhi_dbgfs_open(struct inode *inode, struct file *fp)
+{
+ fp->private_data = inode->i_private;
+ return 0;
+}
+
static const struct file_operations mhi_dbgfs_chan_fops = {
.read = mhi_dbgfs_chan_read,
.write = NULL,
+ .open = mhi_dbgfs_open,
};
static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
@@ -143,8 +152,7 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
uintptr_t v_rp_index;
uintptr_t device_p_rp_index;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
if (NULL == mhi_dev_ctxt)
return -EIO;
*offp = (u32)(*offp) % mhi_dev_ctxt->mmio_info.nr_event_rings;
@@ -209,31 +217,15 @@ static ssize_t mhi_dbgfs_ev_read(struct file *fp, char __user *buf,
static const struct file_operations mhi_dbgfs_ev_fops = {
.read = mhi_dbgfs_ev_read,
.write = NULL,
-};
-
-static ssize_t mhi_dbgfs_trigger_msi(struct file *fp, const char __user *buf,
- size_t count, loff_t *offp)
-{
- u32 msi_nr = 0;
- void *irq_ctxt = &((mhi_devices.device_list[0]).pcie_device->dev);
-
- if (copy_from_user(&msi_nr, buf, sizeof(msi_nr)))
- return -ENOMEM;
- mhi_msi_handlr(msi_nr, irq_ctxt);
- return 0;
-}
-
-static const struct file_operations mhi_dbgfs_trigger_msi_fops = {
- .read = NULL,
- .write = mhi_dbgfs_trigger_msi,
+ .open = mhi_dbgfs_open,
};
static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
int amnt_copied = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
+ struct mhi_device_ctxt *mhi_dev_ctxt = fp->private_data;
+
if (NULL == mhi_dev_ctxt)
return -EIO;
msleep(100);
@@ -260,7 +252,7 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
"device_wake:",
atomic_read(&mhi_dev_ctxt->counters.device_wake),
"usage_count:",
- atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+ atomic_read(&mhi_dev_ctxt->pcie_device->dev.
power.usage_count),
"outbound_acks:",
atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
@@ -275,63 +267,61 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
static const struct file_operations mhi_dbgfs_state_fops = {
.read = mhi_dbgfs_state_read,
.write = NULL,
+ .open = mhi_dbgfs_open,
};
int mhi_init_debugfs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct dentry *mhi_chan_stats;
struct dentry *mhi_state_stats;
- struct dentry *mhi_msi_trigger;
struct dentry *mhi_ev_stats;
-
- mhi_dev_ctxt->mhi_parent_folder =
- debugfs_create_dir("mhi", NULL);
- if (mhi_dev_ctxt->mhi_parent_folder == NULL) {
- mhi_log(MHI_MSG_INFO, "Failed to create debugfs parent dir.\n");
+ const struct pcie_core_info *core = &mhi_dev_ctxt->core;
+ char node_name[32];
+
+ snprintf(node_name,
+ sizeof(node_name),
+ "%04x_%02u.%02u.%02u",
+ core->dev_id, core->domain, core->bus, core->slot);
+
+ mhi_dev_ctxt->child =
+ debugfs_create_dir(node_name, mhi_dev_ctxt->parent);
+ if (mhi_dev_ctxt->child == NULL) {
+ mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
+ "Failed to create debugfs parent dir.\n");
return -EIO;
}
mhi_chan_stats = debugfs_create_file("mhi_chan_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_chan_fops);
if (mhi_chan_stats == NULL)
return -ENOMEM;
mhi_ev_stats = debugfs_create_file("mhi_ev_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_ev_fops);
if (mhi_ev_stats == NULL)
goto clean_chan;
mhi_state_stats = debugfs_create_file("mhi_state_stats",
0444,
- mhi_dev_ctxt->mhi_parent_folder,
+ mhi_dev_ctxt->child,
mhi_dev_ctxt,
&mhi_dbgfs_state_fops);
if (mhi_state_stats == NULL)
goto clean_ev_stats;
- mhi_msi_trigger = debugfs_create_file("mhi_msi_trigger",
- 0444,
- mhi_dev_ctxt->mhi_parent_folder,
- mhi_dev_ctxt,
- &mhi_dbgfs_trigger_msi_fops);
- if (mhi_msi_trigger == NULL)
- goto clean_state;
mhi_dev_ctxt->chan_info = kmalloc(MHI_LOG_SIZE, GFP_KERNEL);
if (mhi_dev_ctxt->chan_info == NULL)
- goto clean_all;
+ goto clean_ev_stats;
return 0;
-clean_all:
- debugfs_remove(mhi_msi_trigger);
-clean_state:
- debugfs_remove(mhi_state_stats);
+
clean_ev_stats:
debugfs_remove(mhi_ev_stats);
clean_chan:
debugfs_remove(mhi_chan_stats);
- debugfs_remove(mhi_dev_ctxt->mhi_parent_folder);
+ debugfs_remove(mhi_dev_ctxt->child);
return -ENOMEM;
}
diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h
index a948a2354de7..712647dc9f7c 100644
--- a/drivers/platform/msm/mhi/mhi_sys.h
+++ b/drivers/platform/msm/mhi/mhi_sys.h
@@ -38,12 +38,13 @@ extern void *mhi_ipc_log;
} \
} while (0)
-#define mhi_log(_msg_lvl, _msg, ...) do { \
+#define mhi_log(mhi_dev_ctxt, _msg_lvl, _msg, ...) do { \
if ((_msg_lvl) >= mhi_msg_lvl) \
pr_alert("[%s] " _msg, __func__, ##__VA_ARGS__);\
- if (mhi_ipc_log && ((_msg_lvl) >= mhi_ipc_log_lvl)) \
- ipc_log_string(mhi_ipc_log, \
- "[%s] " _msg, __func__, ##__VA_ARGS__); \
+ if (mhi_dev_ctxt->mhi_ipc_log && \
+ ((_msg_lvl) >= mhi_ipc_log_lvl)) \
+ ipc_log_string(mhi_dev_ctxt->mhi_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
extern const char * const mhi_states_str[MHI_STATE_LIMIT];
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index 96c4671f994f..0e28ebdd8fea 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,8 +31,6 @@
#define MHI_DEV_NODE_NAME_LEN 13
#define MHI_SOFTWARE_CLIENT_LIMIT 23
-#define TRE_TYPICAL_SIZE 0x1000
-#define TRE_MAX_SIZE 0xFFFF
#define MHI_UCI_IPC_LOG_PAGES (25)
#define MAX_NR_TRBS_PER_CHAN 10
@@ -129,9 +127,8 @@ struct uci_client {
struct mhi_uci_ctxt_t {
struct list_head node;
- struct platform_dev *pdev;
+ struct platform_device *pdev;
struct uci_client client_handles[MHI_SOFTWARE_CLIENT_LIMIT];
- struct mhi_client_info_t client_info;
dev_t dev_t;
struct mutex ctrl_mutex;
struct cdev cdev[MHI_SOFTWARE_CLIENT_LIMIT];
@@ -332,8 +329,8 @@ static int mhi_uci_send_packet(struct mhi_client_handle **client_handle,
return 0;
for (i = 0; i < nr_avail_trbs; ++i) {
- data_to_insert_now = min(data_left_to_insert,
- TRE_MAX_SIZE);
+ data_to_insert_now = min_t(size_t, data_left_to_insert,
+ uci_handle->out_attr.max_packet_size);
if (is_uspace_buf) {
data_loc = kmalloc(data_to_insert_now, GFP_KERNEL);
if (NULL == data_loc) {
@@ -1172,6 +1169,9 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
uci_handle = cb_info->result->user_data;
switch (cb_info->cb_reason) {
case MHI_CB_MHI_ENABLED:
+ uci_log(uci_handle->uci_ipc_log,
+ UCI_DBG_INFO,
+ "MHI enabled CB received.\n");
atomic_set(&uci_handle->mhi_disabled, 0);
break;
case MHI_CB_MHI_DISABLED:
@@ -1202,9 +1202,11 @@ static void uci_xfer_cb(struct mhi_cb_info *cb_info)
}
}
-static int mhi_register_client(struct uci_client *mhi_client)
+static int mhi_register_client(struct uci_client *mhi_client,
+ struct device *dev)
{
int ret_val = 0;
+ struct mhi_client_info_t client_info;
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_INFO,
@@ -1222,11 +1224,13 @@ static int mhi_register_client(struct uci_client *mhi_client)
UCI_DBG_INFO,
"Registering chan %d\n",
mhi_client->out_chan);
- ret_val = mhi_register_channel(&mhi_client->out_handle,
- mhi_client->out_chan,
- 0,
- &mhi_client->uci_ctxt->client_info,
- mhi_client);
+ client_info.dev = dev;
+ client_info.node_name = "qcom,mhi";
+ client_info.user_data = mhi_client;
+ client_info.mhi_client_cb = uci_xfer_cb;
+ client_info.chan = mhi_client->out_chan;
+ client_info.max_payload = mhi_client->out_attr.max_packet_size;
+ ret_val = mhi_register_channel(&mhi_client->out_handle, &client_info);
if (0 != ret_val)
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_ERROR,
@@ -1238,11 +1242,9 @@ static int mhi_register_client(struct uci_client *mhi_client)
UCI_DBG_INFO,
"Registering chan %d\n",
mhi_client->in_chan);
- ret_val = mhi_register_channel(&mhi_client->in_handle,
- mhi_client->in_chan,
- 0,
- &mhi_client->uci_ctxt->client_info,
- mhi_client);
+ client_info.max_payload = mhi_client->in_attr.max_packet_size;
+ client_info.chan = mhi_client->in_chan;
+ ret_val = mhi_register_channel(&mhi_client->in_handle, &client_info);
if (0 != ret_val)
uci_log(mhi_client->uci_ipc_log,
UCI_DBG_ERROR,
@@ -1266,13 +1268,16 @@ static int mhi_uci_probe(struct platform_device *pdev)
struct mhi_uci_ctxt_t *uci_ctxt;
int ret_val;
int i;
- char node_name[16];
+ char node_name[32];
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
UCI_DBG_INFO,
"Entered with pdev:%p\n",
pdev);
+ if (mhi_is_device_ready(&pdev->dev, "qcom,mhi") == false)
+ return -EPROBE_DEFER;
+
if (pdev->dev.of_node == NULL)
return -ENODEV;
@@ -1286,7 +1291,7 @@ static int mhi_uci_probe(struct platform_device *pdev)
if (!uci_ctxt)
return -ENOMEM;
- uci_ctxt->client_info.mhi_client_cb = uci_xfer_cb;
+ uci_ctxt->pdev = pdev;
mutex_init(&uci_ctxt->ctrl_mutex);
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
@@ -1309,7 +1314,8 @@ static int mhi_uci_probe(struct platform_device *pdev)
uci_client->uci_ctxt = uci_ctxt;
if (uci_client->in_attr.uci_ownership) {
- ret_val = mhi_register_client(uci_client);
+ ret_val = mhi_register_client(uci_client,
+ &pdev->dev);
if (ret_val) {
uci_log(mhi_uci_drv_ctxt.mhi_uci_ipc_log,
UCI_DBG_CRITICAL,
@@ -1319,7 +1325,13 @@ static int mhi_uci_probe(struct platform_device *pdev)
return -EIO;
}
- snprintf(node_name, sizeof(node_name), "mhi-uci%d",
+ snprintf(node_name,
+ sizeof(node_name),
+ "mhi_uci_%04x_%02u.%02u.%02u_%d",
+ uci_client->out_handle->dev_id,
+ uci_client->out_handle->domain,
+ uci_client->out_handle->bus,
+ uci_client->out_handle->slot,
uci_client->out_attr.chan_id);
uci_client->uci_ipc_log = ipc_log_context_create
(MHI_UCI_IPC_LOG_PAGES,
@@ -1364,11 +1376,16 @@ static int mhi_uci_probe(struct platform_device *pdev)
}
uci_client->dev =
device_create(mhi_uci_drv_ctxt.mhi_uci_class,
- NULL,
- uci_ctxt->dev_t + i,
- NULL,
- DEVICE_NAME "_pipe_%d",
- uci_client->out_chan);
+ NULL,
+ uci_ctxt->dev_t + i,
+ NULL,
+ DEVICE_NAME "_%04x_%02u.%02u.%02u%s%d",
+ uci_client->out_handle->dev_id,
+ uci_client->out_handle->domain,
+ uci_client->out_handle->bus,
+ uci_client->out_handle->slot,
+ "_pipe_",
+ uci_client->out_chan);
if (IS_ERR(uci_client->dev)) {
uci_log(uci_client->uci_ipc_log,
UCI_DBG_ERROR,
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index e1a92fb23912..c07e9f083204 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -188,6 +188,38 @@ void unlock_votable(struct votable *votable)
}
/**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ * The unlocked and locked variants of getting whether a client's
+ vote is enabled.
+ * @votable: the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ * True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str)
+{
+ int client_id = get_client_id(votable, client_str);
+
+ if (client_id < 0)
+ return false;
+
+ return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+ bool enabled;
+
+ lock_votable(votable);
+ enabled = is_client_vote_enabled_locked(votable, client_str);
+ unlock_votable(votable);
+ return enabled;
+}
+
+/**
* get_client_vote() -
* get_client_vote_locked() -
* The unlocked and locked variants of getting a client's voted
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/drivers/power/supply/qcom/pmic-voter.h
index 031b9a010a42..f202bf704055 100644
--- a/drivers/power/supply/qcom/pmic-voter.h
+++ b/drivers/power/supply/qcom/pmic-voter.h
@@ -24,6 +24,9 @@ enum votable_type {
NUM_VOTABLE_TYPES,
};
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+ const char *client_str);
int get_client_vote(struct votable *votable, const char *client_str);
int get_client_vote_locked(struct votable *votable, const char *client_str);
int get_effective_result(struct votable *votable);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 39afc235fbc3..5dcd4c36675a 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1696,6 +1696,9 @@ static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
if (!chip->dt.auto_recharge_soc)
return 0;
+ if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY)
+ return 0;
+
fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
rc = fg_sram_write(chip,
chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
@@ -1712,46 +1715,55 @@ static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
static int fg_adjust_recharge_soc(struct fg_chip *chip)
{
int rc, msoc, recharge_soc, new_recharge_soc = 0;
+ bool recharge_soc_status;
if (!chip->dt.auto_recharge_soc)
return 0;
recharge_soc = chip->dt.recharge_soc_thr;
+ recharge_soc_status = chip->recharge_soc_adjusted;
/*
* If the input is present and charging had been terminated, adjust
* the recharge SOC threshold based on the monotonic SOC at which
* the charge termination had happened.
*/
- if (is_input_present(chip) && !chip->recharge_soc_adjusted
- && chip->charge_done) {
- /* Get raw monotonic SOC for calculation */
- rc = fg_get_msoc(chip, &msoc);
- if (rc < 0) {
- pr_err("Error in getting msoc, rc=%d\n", rc);
- return rc;
- }
+ if (is_input_present(chip)) {
+ if (chip->charge_done) {
+ if (!chip->recharge_soc_adjusted) {
+ /* Get raw monotonic SOC for calculation */
+ rc = fg_get_msoc(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n",
+ rc);
+ return rc;
+ }
- /* Adjust the recharge_soc threshold */
- new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
- } else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
- || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ /* Adjust the recharge_soc threshold */
+ new_recharge_soc = msoc - (FULL_CAPACITY -
+ recharge_soc);
+ chip->recharge_soc_adjusted = true;
+ } else {
+ /* adjusted already, do nothing */
+ return 0;
+ }
+ } else {
+ /* Charging, do nothing */
+ return 0;
+ }
+ } else {
/* Restore the default value */
new_recharge_soc = recharge_soc;
+ chip->recharge_soc_adjusted = false;
}
- if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
- rc = fg_set_recharge_soc(chip, new_recharge_soc);
- if (rc) {
- pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
- return rc;
- }
-
- chip->recharge_soc_adjusted = (new_recharge_soc !=
- recharge_soc);
- fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
- new_recharge_soc);
+ rc = fg_set_recharge_soc(chip, new_recharge_soc);
+ if (rc < 0) {
+ chip->recharge_soc_adjusted = recharge_soc_status;
+ pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+ return rc;
}
+ fg_dbg(chip, FG_STATUS, "resume soc set to %d\n", new_recharge_soc);
return 0;
}
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 4f126854728f..7d2e00dc934b 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -161,39 +161,14 @@ static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
int smblib_icl_override(struct smb_charger *chg, bool override)
{
int rc;
- bool override_status;
- u8 stat;
- u16 reg;
-
- switch (chg->smb_version) {
- case PMI8998_SUBTYPE:
- reg = APSD_RESULT_STATUS_REG;
- break;
- case PM660_SUBTYPE:
- reg = AICL_STATUS_REG;
- break;
- default:
- smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
- chg->smb_version);
- return -EINVAL;
- }
- rc = smblib_read(chg, reg, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
- return rc;
- }
- override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
+ rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+ ICL_OVERRIDE_AFTER_APSD_BIT,
+ override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
- if (override != override_status) {
- rc = smblib_masked_write(chg, CMD_APSD_REG,
- ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
- return rc;
- }
- }
- return 0;
+ return rc;
}
/********************
@@ -727,21 +702,6 @@ static void smblib_uusb_removal(struct smb_charger *chg)
rc);
}
-static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
-{
- int rc;
- u8 stat;
-
- rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
- /* assuming 'not usbin' in case of read failure */
- return false;
- }
-
- return stat & SYSOK_REASON_USBIN_BIT;
-}
-
void smblib_suspend_on_debug_battery(struct smb_charger *chg)
{
int rc;
@@ -1098,16 +1058,6 @@ static int smblib_apsd_disable_vote_callback(struct votable *votable,
int rc;
if (apsd_disable) {
- /* Don't run APSD on CC debounce when APSD is disabled */
- rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
- APSD_START_ON_CC_BIT,
- 0);
- if (rc < 0) {
- smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
- rc);
- return rc;
- }
-
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
AUTO_SRC_DETECT_BIT,
0);
@@ -1123,15 +1073,6 @@ static int smblib_apsd_disable_vote_callback(struct votable *votable,
smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
return rc;
}
-
- rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
- APSD_START_ON_CC_BIT,
- APSD_START_ON_CC_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
- rc);
- return rc;
- }
}
return 0;
@@ -2510,10 +2451,6 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
}
chg->voltage_max_uv = max_uv;
- rc = smblib_rerun_aicl(chg);
- if (rc < 0)
- smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
-
return rc;
}
@@ -2568,6 +2505,9 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
+ /* since PD was found the cable must be non-legacy */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
/* clear USB ICL vote for DCP_VOTER */
rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
if (rc < 0)
@@ -2685,12 +2625,6 @@ int smblib_reg_block_restore(struct smb_charger *chg,
static struct reg_info cc2_detach_settings[] = {
{
- .reg = TYPE_C_CFG_REG,
- .mask = APSD_START_ON_CC_BIT,
- .val = 0,
- .desc = "TYPE_C_CFG_REG",
- },
- {
.reg = TYPE_C_CFG_2_REG,
.mask = TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
.val = TYPE_C_UFP_MODE_BIT,
@@ -3406,6 +3340,37 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
rising ? "rising" : "falling");
}
+static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
+{
+ switch (pst) {
+ case POWER_SUPPLY_TYPE_USB:
+ /*
+ * USB_PSY will vote to increase the current to 500/900mA once
+ * enumeration is done. Ensure that USB_PSY has at least voted
+ * for 100mA before releasing the LEGACY_UNKNOWN vote
+ */
+ if (!is_client_vote_enabled(chg->usb_icl_votable,
+ USB_PSY_VOTER))
+ vote(chg->usb_icl_votable, USB_PSY_VOTER, true, 100000);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+ break;
+ case POWER_SUPPLY_TYPE_USB_DCP:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+ break;
+ case POWER_SUPPLY_TYPE_USB_HVDCP:
+ case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
+ break;
+ default:
+ smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 500000);
+ break;
+ }
+}
+
#define HVDCP_DET_MS 2500
static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
{
@@ -3415,6 +3380,10 @@ static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
return;
apsd_result = smblib_update_usb_type(chg);
+
+ if (!chg->pd_active)
+ smblib_force_legacy_icl(chg, apsd_result->pst);
+
switch (apsd_result->bit) {
case SDP_CHARGER_BIT:
case CDP_CHARGER_BIT:
@@ -3495,6 +3464,9 @@ static void typec_source_removal(struct smb_charger *chg)
{
int rc;
+ /* reset legacy unknown vote */
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
@@ -3548,6 +3520,15 @@ static void typec_source_removal(struct smb_charger *chg)
static void typec_source_insertion(struct smb_charger *chg)
{
+ /*
+ * at any time we want LEGACY_UNKNOWN, PD, or USB_PSY to be voting for
+ * ICL, so vote LEGACY_UNKNOWN here if none of the above three have
+ * casted their votes
+ */
+ if (!is_client_vote_enabled(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER)
+ && !is_client_vote_enabled(chg->usb_icl_votable, PD_VOTER)
+ && !is_client_vote_enabled(chg->usb_icl_votable, USB_PSY_VOTER))
+ vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
}
static void typec_sink_insertion(struct smb_charger *chg)
@@ -3568,10 +3549,10 @@ static void typec_sink_removal(struct smb_charger *chg)
static void smblib_handle_typec_removal(struct smb_charger *chg)
{
+ int rc;
+
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
- vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
- vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
/* reset votes from vbus_cc_short */
@@ -3590,10 +3571,13 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->pulse_cnt = 0;
chg->usb_icl_delta_ua = 0;
- chg->usb_ever_removed = true;
+ /* enable APSD CC trigger for next insertion */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+ APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
smblib_update_usb_type(chg);
-
typec_source_removal(chg);
typec_sink_removal(chg);
}
@@ -3601,12 +3585,16 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
static void smblib_handle_typec_insertion(struct smb_charger *chg,
bool sink_attached, bool legacy_cable)
{
- int rp;
- bool vbus_cc_short = false;
- bool valid_legacy_cable;
+ int rp, rc;
vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
+ /* disable APSD CC trigger since CC is attached */
+ rc = smblib_masked_write(chg, TYPE_C_CFG_REG, APSD_START_ON_CC_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+ rc);
+
if (sink_attached) {
typec_source_removal(chg);
typec_sink_insertion(chg);
@@ -3615,25 +3603,18 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg,
typec_sink_removal(chg);
}
- valid_legacy_cable = legacy_cable &&
- (chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
- vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
- valid_legacy_cable, 0);
-
- if (valid_legacy_cable) {
- rp = smblib_get_prop_ufp_mode(chg);
- if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
- || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
- vbus_cc_short = true;
- smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
- rp);
- }
+ rp = smblib_get_prop_ufp_mode(chg);
+ if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+ || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+ smblib_dbg(chg, PR_MISC, "VBUS & CC could be shorted; keeping HVDCP disabled\n");
+ /* HVDCP is not going to be enabled; enable parallel */
+ vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ true, 0);
+ } else {
+ vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+ false, 0);
}
-
- vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
- vbus_cc_short, 0);
- vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
- vbus_cc_short, 0);
}
static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 22ef78fd9641..de236164e6b2 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -47,7 +47,6 @@ enum print_reason {
#define PD_DISALLOWED_INDIRECT_VOTER "PD_DISALLOWED_INDIRECT_VOTER"
#define PD_HARD_RESET_VOTER "PD_HARD_RESET_VOTER"
#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
-#define LEGACY_CABLE_VOTER "LEGACY_CABLE_VOTER"
#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
#define HVDCP_INDIRECT_VOTER "HVDCP_INDIRECT_VOTER"
@@ -58,6 +57,7 @@ enum print_reason {
#define CTM_VOTER "CTM_VOTER"
#define SW_QC3_VOTER "SW_QC3_VOTER"
#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
+#define LEGACY_UNKNOWN_VOTER "LEGACY_UNKNOWN_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -316,7 +316,6 @@ struct smb_charger {
/* extcon for VBUS / ID notification to USB for uUSB */
struct extcon_dev *extcon;
- bool usb_ever_removed;
int icl_reduction_ua;
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 54b6b38d134b..f7c13390d477 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -628,6 +628,7 @@ enum {
#define USBIN_LOAD_CFG_REG (USBIN_BASE + 0x65)
#define USBIN_OV_CH_LOAD_OPTION_BIT BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT BIT(4)
#define USBIN_ICL_OPTIONS_REG (USBIN_BASE + 0x66)
#define CFG_USB3P0_SEL_BIT BIT(2)
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 2fcd1a4c636c..3ec2c7ac01ba 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -104,13 +104,16 @@ module_param(qmi_timeout, ulong, 0600);
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
if (!(_condition)) { \
- icnss_pr_err("ASSERT at line %d\n", \
- __LINE__); \
+ icnss_pr_err("ASSERT at line %d\n", __LINE__); \
BUG_ON(1); \
} \
} while (0)
+
+bool ignore_qmi_timeout;
+#define ICNSS_QMI_ASSERT() ICNSS_ASSERT(ignore_qmi_timeout)
#else
#define ICNSS_ASSERT(_condition) do { } while (0)
+#define ICNSS_QMI_ASSERT() do { } while (0)
#endif
enum icnss_debug_quirks {
@@ -349,6 +352,15 @@ static struct icnss_priv {
bool bypass_s1_smmu;
} *penv;
+#ifdef CONFIG_ICNSS_DEBUG
+static void icnss_ignore_qmi_timeout(bool ignore)
+{
+ ignore_qmi_timeout = ignore;
+}
+#else
+static void icnss_ignore_qmi_timeout(bool ignore) { }
+#endif
+
static void icnss_pm_stay_awake(struct icnss_priv *priv)
{
if (atomic_inc_return(&priv->pm_count) != 1)
@@ -1132,7 +1144,7 @@ static int wlfw_msa_mem_info_send_sync_msg(void)
out:
penv->stats.msa_info_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1180,7 +1192,7 @@ static int wlfw_msa_ready_send_sync_msg(void)
out:
penv->stats.msa_ready_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1243,7 +1255,7 @@ static int wlfw_ind_register_send_sync_msg(void)
out:
penv->stats.ind_register_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1312,7 +1324,7 @@ static int wlfw_cap_send_sync_msg(void)
out:
penv->stats.cap_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1373,7 +1385,7 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
out:
penv->stats.mode_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1423,7 +1435,7 @@ static int wlfw_wlan_cfg_send_sync_msg(struct wlfw_wlan_cfg_req_msg_v01 *data)
out:
penv->stats.cfg_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1476,7 +1488,7 @@ static int wlfw_ini_send_sync_msg(uint8_t fw_log_mode)
out:
penv->stats.ini_req_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1642,7 +1654,7 @@ static int wlfw_rejuvenate_ack_send_sync_msg(struct icnss_priv *priv)
out:
priv->stats.rejuvenate_ack_err++;
- ICNSS_ASSERT(false);
+ ICNSS_QMI_ASSERT();
return ret;
}
@@ -1774,6 +1786,8 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
case QMI_WLFW_REJUVENATE_IND_V01:
icnss_pr_dbg("Received Rejuvenate Indication msg_id 0x%x, state: 0x%lx\n",
msg_id, penv->state);
+
+ icnss_ignore_qmi_timeout(true);
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
return;
@@ -2142,7 +2156,7 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
struct icnss_event_pd_service_down_data *event_data = data;
if (!test_bit(ICNSS_WLFW_EXISTS, &priv->state))
- return 0;
+ goto out;
if (test_bit(ICNSS_PD_RESTART, &priv->state)) {
icnss_pr_err("PD Down while recovery inprogress, crashed: %d, state: 0x%lx\n",
@@ -2159,6 +2173,8 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
out:
kfree(data);
+ icnss_ignore_qmi_timeout(false);
+
return ret;
}
@@ -2300,9 +2316,11 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
if (test_bit(ICNSS_PDR_ENABLED, &priv->state))
return NOTIFY_OK;
- icnss_pr_info("Modem went down, state: %lx, crashed: %d\n",
+ icnss_pr_info("Modem went down, state: 0x%lx, crashed: %d\n",
priv->state, notif->crashed);
+ icnss_ignore_qmi_timeout(true);
+
event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
if (event_data == NULL)
@@ -2409,6 +2427,8 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
}
event_post:
+ icnss_ignore_qmi_timeout(true);
+
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
ICNSS_EVENT_SYNC, event_data);
done:
diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c
index 9a0624804c21..f9d967fd0af6 100644
--- a/drivers/soc/qcom/ipc_router_mhi_xprt.c
+++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c
@@ -792,20 +792,14 @@ static int ipc_router_mhi_driver_register(
{
int rc_status;
- rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle,
- mhi_xprtp->ch_hndl.out_chan_id, 0,
- &mhi_xprtp->ch_hndl.out_clnt_info,
- (void *)mhi_xprtp);
+ rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle, NULL);
if (rc_status) {
IPC_RTR_ERR("%s: Error %d registering out_chan for %s\n",
__func__, rc_status, mhi_xprtp->xprt_name);
return -EFAULT;
}
- rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle,
- mhi_xprtp->ch_hndl.in_chan_id, 0,
- &mhi_xprtp->ch_hndl.in_clnt_info,
- (void *)mhi_xprtp);
+ rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle, NULL);
if (rc_status) {
mhi_deregister_channel(mhi_xprtp->ch_hndl.out_handle);
IPC_RTR_ERR("%s: Error %d registering in_chan for %s\n",
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index d6853e4bea72..0e7bf13c192b 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -101,6 +101,7 @@ struct elem_info qmi_response_type_v01_ei[] = {
.ei_array = NULL,
},
};
+EXPORT_SYMBOL(qmi_response_type_v01_ei);
struct elem_info qmi_error_resp_type_v01_ei[] = {
{
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 8581ed587ead..0d6c1d62c732 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -266,10 +266,9 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd)
if (!domains_read) {
db_rev_count = pd->db_rev_count = resp->db_rev_count;
pd->total_domains = resp->total_domains;
- if (!pd->total_domains && resp->domain_list_len) {
- pr_err("total domains not set\n");
- pd->total_domains = resp->domain_list_len;
- }
+ if (!resp->total_domains)
+ pr_info("No matching domains found\n");
+
pd->domain_list = kmalloc(
sizeof(struct servreg_loc_entry_v01) *
resp->total_domains, GFP_KERNEL);
@@ -286,6 +285,10 @@ static int service_locator_send_msg(struct pd_qmi_client_data *pd)
rc = -EAGAIN;
goto out;
}
+ if (resp->domain_list_len > resp->total_domains) {
+ /* Always read total_domains from the response msg */
+ resp->domain_list_len = resp->total_domains;
+ }
/* Copy the response*/
store_get_domain_list_response(pd, resp, domains_read);
domains_read += resp->domain_list_len;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index ac17d0aa46ae..45c39d3c4225 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -153,6 +153,7 @@ static unsigned int rndis_qc_bitrate(struct usb_gadget *g)
/* interface descriptor: */
+/* interface descriptor: Supports "Wireless" RNDIS; auto-detected by Windows*/
static struct usb_interface_descriptor rndis_qc_control_intf = {
.bLength = sizeof(rndis_qc_control_intf),
.bDescriptorType = USB_DT_INTERFACE,
@@ -160,9 +161,9 @@ static struct usb_interface_descriptor rndis_qc_control_intf = {
/* .bInterfaceNumber = DYNAMIC */
/* status endpoint is optional; this could be patched later */
.bNumEndpoints = 1,
- .bInterfaceClass = USB_CLASS_COMM,
- .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM,
- .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR,
+ .bInterfaceClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bInterfaceSubClass = 0x01,
+ .bInterfaceProtocol = 0x03,
/* .iInterface = DYNAMIC */
};
@@ -214,15 +215,16 @@ static struct usb_interface_descriptor rndis_qc_data_intf = {
};
+/* Supports "Wireless" RNDIS; auto-detected by Windows */
static struct usb_interface_assoc_descriptor
rndis_qc_iad_descriptor = {
.bLength = sizeof(rndis_qc_iad_descriptor),
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
.bFirstInterface = 0, /* XXX, hardcoded */
.bInterfaceCount = 2, /* control + data */
- .bFunctionClass = USB_CLASS_COMM,
- .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
- .bFunctionProtocol = USB_CDC_PROTO_NONE,
+ .bFunctionClass = USB_CLASS_WIRELESS_CONTROLLER,
+ .bFunctionSubClass = 0x01,
+ .bFunctionProtocol = 0x03,
/* .iFunction = DYNAMIC */
};
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 05d96fd8c07c..3f106b428dba 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -38,12 +38,19 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
+ struct device_node *node = dev->of_node;
+ struct usb_xhci_pdata *pdata = dev_get_platdata(dev);
+
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
+
+ if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
+ (pdata && pdata->usb3_lpm_capable))
+ xhci->quirks |= XHCI_LPM_SUPPORT;
}
/* called during probe() after chip reset completes */
@@ -129,7 +136,6 @@ static DEVICE_ATTR(config_imod, S_IRUGO | S_IWUSR,
static int xhci_plat_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct usb_xhci_pdata *pdata = dev_get_platdata(&pdev->dev);
const struct hc_driver *driver;
struct xhci_hcd *xhci;
@@ -227,10 +233,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
- if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
- (pdata && pdata->usb3_lpm_capable))
- xhci->quirks |= XHCI_LPM_SUPPORT;
-
if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index ada39f5f486a..3d5d046c536a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -661,6 +661,7 @@ struct mdss_mdp_img_data {
struct dma_buf *srcp_dma_buf;
struct dma_buf_attachment *srcp_attachment;
struct sg_table *srcp_table;
+ struct ion_handle *ihandle;
};
enum mdss_mdp_data_state {
@@ -702,6 +703,8 @@ struct pp_hist_col_info {
char __iomem *base;
u32 intr_shift;
u32 disp_num;
+ u32 expect_sum;
+ u32 next_sum;
struct mdss_mdp_ctl *ctl;
};
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index bec652f7e4ba..bd70535e79f9 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -4496,11 +4496,15 @@ end:
*/
static void mdss_mdp_pipe_reset(struct mdss_mdp_mixer *mixer, bool is_recovery)
{
- unsigned long pipe_map = mixer->pipe_mapped;
+ unsigned long pipe_map;
u32 bit = 0;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool sw_rst_avail = mdss_mdp_pipe_is_sw_reset_available(mdata);
+ if (!mixer)
+ return;
+
+ pipe_map = mixer->pipe_mapped;
pr_debug("pipe_map=0x%lx\n", pipe_map);
for_each_set_bit_from(bit, &pipe_map, MAX_PIPES_PER_LM) {
struct mdss_mdp_pipe *pipe;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index c5e1c966ee2f..583cfed598cd 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -3207,6 +3207,8 @@ int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
ctx->default_pp_num, NULL, NULL);
memset(ctx, 0, sizeof(*ctx));
+ /* intf stopped, no more kickoff */
+ ctx->intf_stopped = 1;
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index c9e32d69d444..fce667a2126d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -1123,6 +1123,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
int ret = 0;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
u64 flags;
+ bool is_right_blend = false;
struct mdss_mdp_mixer *mixer = NULL;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -1234,6 +1235,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
* staging, same pipe will be stagged on both layer mixers.
*/
if (mdata->has_src_split) {
+ is_right_blend = pipe->is_right_blend;
if (left_blend_pipe) {
if (__validate_pipe_priorities(left_blend_pipe, pipe)) {
pr_err("priority limitation. left:%d rect:%d, right:%d rect:%d\n",
@@ -1245,7 +1247,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
goto end;
} else {
pr_debug("pipe%d is a right_pipe\n", pipe->num);
- pipe->is_right_blend = true;
+ is_right_blend = true;
}
} else if (pipe->is_right_blend) {
/*
@@ -1254,7 +1256,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
*/
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
- pipe->is_right_blend = false;
+ is_right_blend = false;
}
if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
@@ -1280,6 +1282,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
}
pipe->src_split_req = false;
}
+ pipe->is_right_blend = is_right_blend;
}
pipe->multirect.mode = vinfo->multirect.mode;
@@ -2261,6 +2264,78 @@ static int __validate_multirect(struct msm_fb_data_type *mfd,
return 0;
}
+static int __check_source_split(struct mdp_input_layer *layer_list,
+ struct mdss_mdp_pipe **pipe_list, u32 index,
+ u32 left_lm_w, struct mdss_mdp_pipe **left_blend_pipe)
+{
+ int i = index - 1;
+ struct mdp_input_layer *curr, *prev;
+ struct mdp_rect *left, *right;
+ bool match = false;
+ struct mdss_mdp_pipe *left_pipe = NULL;
+
+ /*
+ * check if current layer is at same z_order as any of the
+ * previous layers, and fail if any or both are async layers,
+ * as async layers should have unique z_order.
+ *
+ * If it has same z_order and qualifies as a right blend,
+ * pass a pointer to the pipe representing previous overlay or
+ * in other terms left blend layer.
+ *
+ * Following logic of selecting left_blend has an inherent
+ * assumption that layer list is sorted on dst_x within a
+ * same z_order. Otherwise it will fail based on z_order checks.
+ */
+ curr = &layer_list[index];
+
+ while (i >= 0) {
+ if (layer_list[i].z_order == curr->z_order) {
+ pr_debug("z=%d found match @ %d of %d\n",
+ curr->z_order, i, index);
+ match = true;
+ break;
+ }
+ i--;
+ }
+
+ if (match) {
+ left_pipe = pipe_list[i];
+ prev = &layer_list[i];
+ left = &prev->dst_rect;
+ right = &curr->dst_rect;
+
+ if ((curr->flags & MDP_LAYER_ASYNC)
+ || (prev->flags & MDP_LAYER_ASYNC)) {
+ curr->error_code = -EINVAL;
+ pr_err("async curr should have unique z_order\n");
+ return curr->error_code;
+ }
+
+ /*
+ * check if curr is right blend by checking it's
+ * directly to the right.
+ */
+ if (((left->x + left->w) == right->x) &&
+ (left->y == right->y) && (left->h == right->h)) {
+ *left_blend_pipe = left_pipe;
+ MDSS_XLOG(curr->z_order, i, index);
+ }
+
+ /*
+ * if the curr is right at the left lm boundary and
+ * src split is not required then right blend is not
+ * required as it will lie only on the left mixer
+ */
+ if (!__layer_needs_src_split(prev) &&
+ ((left->x + left->w) == left_lm_w))
+ *left_blend_pipe = NULL;
+ }
+
+ return 0;
+}
+
+
/*
* __validate_layers() - validate input layers
* @mfd: Framebuffer data structure for display
@@ -2291,13 +2366,14 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_mixer *mixer = NULL;
- struct mdp_input_layer *layer, *prev_layer, *layer_list;
+ struct mdp_input_layer *layer, *layer_list;
struct mdss_mdp_validate_info_t *validate_info_list = NULL;
bool is_single_layer = false, force_validate;
enum layer_pipe_q pipe_q_type;
enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
enum mdss_mdp_pipe_rect rect_num;
struct mdp_destination_scaler_data *ds_data;
+ struct mdss_mdp_pipe *pipe_list[MAX_LAYER_COUNT] = {0};
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
@@ -2369,49 +2445,10 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
dst_x = layer->dst_rect.x;
left_blend_pipe = NULL;
- prev_layer = (i > 0) ? &layer_list[i - 1] : NULL;
- /*
- * check if current layer is at same z_order as
- * previous one, and fail if any or both are async layers,
- * as async layers should have unique z_order.
- *
- * If it has same z_order and qualifies as a right blend,
- * pass a pointer to the pipe representing previous overlay or
- * in other terms left blend layer.
- *
- * Following logic of selecting left_blend has an inherent
- * assumption that layer list is sorted on dst_x within a
- * same z_order. Otherwise it will fail based on z_order checks.
- */
- if (prev_layer && (prev_layer->z_order == layer->z_order)) {
- struct mdp_rect *left = &prev_layer->dst_rect;
- struct mdp_rect *right = &layer->dst_rect;
-
- if ((layer->flags & MDP_LAYER_ASYNC)
- || (prev_layer->flags & MDP_LAYER_ASYNC)) {
- ret = -EINVAL;
- layer->error_code = ret;
- pr_err("async layer should have unique z_order\n");
- goto validate_exit;
- }
-
- /*
- * check if layer is right blend by checking it's
- * directly to the right.
- */
- if (((left->x + left->w) == right->x) &&
- (left->y == right->y) && (left->h == right->h))
- left_blend_pipe = pipe;
-
- /*
- * if the layer is right at the left lm boundary and
- * src split is not required then right blend is not
- * required as it will lie only on the left mixer
- */
- if (!__layer_needs_src_split(prev_layer) &&
- ((left->x + left->w) == left_lm_w))
- left_blend_pipe = NULL;
- }
+ if ((i > 0) &&
+ __check_source_split(layer_list, pipe_list, i, left_lm_w,
+ &left_blend_pipe))
+ goto validate_exit;
if (!is_split_lm(mfd) || __layer_needs_src_split(layer))
z = LAYER_ZORDER_BOTH;
@@ -2456,6 +2493,8 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
else
left_plist[left_cnt++] = pipe;
+ pipe_list[i] = pipe;
+
if (layer->flags & MDP_LAYER_PP) {
memcpy(&pipe->pp_cfg, layer->pp_info,
sizeof(struct mdp_overlay_pp_params));
@@ -2548,6 +2587,8 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
else
left_plist[left_cnt++] = pipe;
+ pipe_list[i] = pipe;
+
pr_debug("id:0x%x flags:0x%x dst_x:%d\n",
layer->pipe_ndx, layer->flags, layer->dst_rect.x);
layer->z_order -= MDSS_MDP_STAGE_0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 1fe8fe6f7be8..f10d4fb60f52 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -2146,6 +2146,7 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
unsigned long flag;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
u32 intr_mask;
+ u32 expected_sum = 0;
if (!mdata)
return -EPERM;
@@ -2156,6 +2157,7 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
block_type = DSPP;
op_flags = BIT(16);
hist_info = &mdss_pp_res->dspp_hist[mix->num];
+ expected_sum = mix->width * mix->height;
base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
if (IS_ERR(base)) {
ret = -EPERM;
@@ -2207,6 +2209,15 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
else if (hist_info->col_en)
*op |= op_flags;
+ if (hist_info->col_en) {
+ if (!hist_info->expect_sum) {
+ hist_info->expect_sum = expected_sum;
+ } else if (hist_info->expect_sum != expected_sum) {
+ hist_info->expect_sum = 0;
+ hist_info->next_sum = expected_sum;
+ }
+ }
+
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
mutex_unlock(&hist_info->hist_mutex);
error:
@@ -5276,8 +5287,7 @@ exit:
static int pp_hist_collect(struct mdp_histogram_data *hist,
struct pp_hist_col_info *hist_info,
- char __iomem *ctl_base, u32 expect_sum,
- u32 block)
+ char __iomem *ctl_base, u32 block)
{
int ret = 0;
int sum = 0;
@@ -5318,11 +5328,16 @@ static int pp_hist_collect(struct mdp_histogram_data *hist,
if (sum < 0) {
pr_err("failed to get the hist data, sum = %d\n", sum);
ret = sum;
- } else if (expect_sum && sum != expect_sum) {
+ } else if (hist_info->expect_sum && sum != hist_info->expect_sum) {
pr_err_ratelimited("hist error: bin sum incorrect! (%d/%d)\n",
- sum, expect_sum);
+ sum, hist_info->expect_sum);
ret = -EINVAL;
}
+
+ if (hist_info->next_sum) {
+ hist_info->expect_sum = hist_info->next_sum;
+ hist_info->next_sum = 0;
+ }
hist_collect_exit:
mutex_unlock(&hist_info->hist_mutex);
return ret;
@@ -5387,8 +5402,7 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
mdata->mixer_intf[dspp_num].height);
if (ret)
temp_ret = ret;
- ret = pp_hist_collect(hist, hists[i], ctl_base,
- exp_sum, DSPP);
+ ret = pp_hist_collect(hist, hists[i], ctl_base, DSPP);
if (ret)
pr_err_ratelimited("hist error: dspp[%d] collect %d\n",
dspp_num, ret);
@@ -5487,7 +5501,7 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
if (ret)
temp_ret = ret;
ret = pp_hist_collect(hist, hist_info, ctl_base,
- exp_sum, SSPP_VIG);
+ SSPP_VIG);
if (ret)
pr_debug("hist error: pipe[%d] collect: %d\n",
pipe->num, ret);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index c14840ffd08d..d0bf61679f61 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -974,7 +974,9 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
* be filled due to map call which will be unmapped above.
*
*/
- pr_debug("skip memory unmapping for secure display/camera content\n");
+ if (data->ihandle)
+ ion_free(iclient, data->ihandle);
+ pr_debug("free memory handle for secure display/camera content\n");
} else {
return -ENOMEM;
}
@@ -1053,19 +1055,18 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
ret = 0;
goto done;
} else {
- struct ion_handle *ihandle = NULL;
struct sg_table *sg_ptr = NULL;
+ data->ihandle = ion_import_dma_buf(iclient,
+ img->memory_id);
+ if (IS_ERR_OR_NULL(data->ihandle)) {
+ ret = -EINVAL;
+ pr_err("ion import buffer failed\n");
+ data->ihandle = NULL;
+ goto done;
+ }
do {
- ihandle = ion_import_dma_buf(iclient,
- img->memory_id);
- if (IS_ERR_OR_NULL(ihandle)) {
- ret = -EINVAL;
- pr_err("ion import buffer failed\n");
- break;
- }
-
- sg_ptr = ion_sg_table(iclient, ihandle);
+ sg_ptr = ion_sg_table(iclient, data->ihandle);
if (sg_ptr == NULL) {
pr_err("ion sg table get failed\n");
ret = -EINVAL;
@@ -1091,8 +1092,6 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
ret = 0;
} while (0);
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(iclient, ihandle);
return ret;
}
}
diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h
index f8ba31ea7573..b9fd610f92da 100644
--- a/include/linux/msm_mhi.h
+++ b/include/linux/msm_mhi.h
@@ -12,12 +12,14 @@
#ifndef MSM_MHI_H
#define MSM_MHI_H
#include <linux/types.h>
-
-struct mhi_client_handle;
+#include <linux/device.h>
#define MHI_DMA_MASK 0xFFFFFFFFFFULL
#define MHI_MAX_MTU 0xFFFF
+struct mhi_client_config;
+struct mhi_device_ctxt;
+
enum MHI_CLIENT_CHANNEL {
MHI_CLIENT_LOOPBACK_OUT = 0,
MHI_CLIENT_LOOPBACK_IN = 1,
@@ -70,11 +72,11 @@ enum MHI_CLIENT_CHANNEL {
};
enum MHI_CB_REASON {
- MHI_CB_XFER = 0x0,
- MHI_CB_MHI_DISABLED = 0x4,
- MHI_CB_MHI_ENABLED = 0x8,
- MHI_CB_CHAN_RESET_COMPLETE = 0x10,
- MHI_CB_reserved = 0x80000000,
+ MHI_CB_XFER,
+ MHI_CB_MHI_DISABLED,
+ MHI_CB_MHI_ENABLED,
+ MHI_CB_MHI_SHUTDOWN,
+ MHI_CB_SYS_ERROR,
};
enum MHI_FLAGS {
@@ -99,10 +101,90 @@ struct mhi_cb_info {
};
struct mhi_client_info_t {
+ enum MHI_CLIENT_CHANNEL chan;
+ const struct device *dev;
+ const char *node_name;
void (*mhi_client_cb)(struct mhi_cb_info *);
+ bool pre_allocate;
+ size_t max_payload;
+ void *user_data;
+};
+
+struct mhi_client_handle {
+ u32 dev_id;
+ u32 domain;
+ u32 bus;
+ u32 slot;
+ struct mhi_client_config *client_config;
+};
+
+struct __packed bhi_vec_entry {
+ u64 phys_addr;
+ u64 size;
+};
+
+/**
+ * struct mhi_device - IO resources for MHI
+ * @dev: device node points to of_node
+ * @pdev: pci device node
+ * @resource: bar memory space and IRQ resources
+ * @pm_runtime_get: fp for bus masters rpm pm_runtime_get
+ * @pm_runtime_noidle: fp for bus masters rpm pm_runtime_noidle
+ * @mhi_dev_ctxt: private data for host
+ */
+struct mhi_device {
+ struct device *dev;
+ struct pci_dev *pci_dev;
+ struct resource resources[2];
+ int (*pm_runtime_get)(struct pci_dev *pci_dev);
+ void (*pm_runtime_noidle)(struct pci_dev *pci_dev);
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+};
+
+enum mhi_dev_ctrl {
+ MHI_DEV_CTRL_INIT,
+ MHI_DEV_CTRL_DE_INIT,
+ MHI_DEV_CTRL_SUSPEND,
+ MHI_DEV_CTRL_RESUME,
+ MHI_DEV_CTRL_POWER_OFF,
+ MHI_DEV_CTRL_POWER_ON,
+ MHI_DEV_CTRL_RAM_DUMP,
+ MHI_DEV_CTRL_NOTIFY_LINK_ERROR,
};
/**
+ * mhi_is_device_ready - Check if MHI is ready to register clients
+ *
+ * @dev: device node that points to DT node
+ * @node_name: device tree node that links MHI node
+ *
+ * @Return true if ready
+ */
+bool mhi_is_device_ready(const struct device * const dev,
+ const char *node_name);
+
+/**
+ * mhi_resgister_device - register hardware resources with MHI
+ *
+ * @mhi_device: resources to be used
+ * @node_name: DT node name
+ * @userdata: cb data for client
+ * @Return 0 on success
+ */
+int mhi_register_device(struct mhi_device *mhi_device,
+ const char *node_name,
+ unsigned long user_data);
+
+/**
+ * mhi_pm_control_device - power management control api
+ * @mhi_device: registered device structure
+ * @ctrl: specific command
+ * @Return 0 on success
+ */
+int mhi_pm_control_device(struct mhi_device *mhi_device,
+ enum mhi_dev_ctrl ctrl);
+
+/**
* mhi_deregister_channel - de-register callbacks from MHI
*
* @client_handle: Handle populated by MHI, opaque to client
@@ -116,21 +198,13 @@ int mhi_deregister_channel(struct mhi_client_handle *client_handle);
* any MHI operations
*
* @client_handle: Handle populated by MHI, opaque to client
- * @chan: Channel provided by client to which the handle
- * maps to.
- * @device_index: MHI device for which client wishes to register, if
- * there are multiple devices supporting MHI. Client
- * should specify 0 for the first device 1 for second etc.
- * @info: Client provided callbacks which MHI will invoke on events
- * @user_data: Client provided context to be returned to client upon
- * callback invocation.
- * Not thread safe, caller must ensure concurrency protection.
+ * @client_info: Channel\device information provided by client to
+ * which the handle maps to.
*
* @Return errno
*/
int mhi_register_channel(struct mhi_client_handle **client_handle,
- enum MHI_CLIENT_CHANNEL chan, s32 device_index,
- struct mhi_client_info_t *client_info, void *user_data);
+ struct mhi_client_info_t *client_info);
/**
* mhi_open_channel - Client must call this function to open a channel
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index d2f19ac6f536..99fe34d25fc5 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -263,10 +263,10 @@ struct drm_msm_event_resp {
#define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
-#define DRM_SDE_WB_CONFIG 0x08
-#define DRM_MSM_REGISTER_EVENT 0x09
-#define DRM_MSM_DEREGISTER_EVENT 0x0A
-#define DRM_MSM_NUM_IOCTLS 0x0B
+
+#define DRM_SDE_WB_CONFIG 0x40
+#define DRM_MSM_REGISTER_EVENT 0x41
+#define DRM_MSM_DEREGISTER_EVENT 0x42
/**
* Currently DRM framework supports only VSYNC event.
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index bc81fb2e1f0e..47d6342b1c48 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -56,6 +56,7 @@
#define EPOLL_PACKED
#endif
+#ifdef __KERNEL__
struct epoll_event {
__u32 events;
__u64 data;
@@ -73,4 +74,5 @@ static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
epev->events &= ~EPOLLWAKEUP;
}
#endif
+#endif /* __KERNEL__ */
#endif /* _UAPI_LINUX_EVENTPOLL_H */
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 21fcb3401298..d84bb30d56fa 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -24,6 +24,8 @@
#define ISP_STATS_STREAM_BIT 0x80000000
+#define VFE_HW_LIMIT 1
+
struct msm_vfe_cfg_cmd_list;
enum ISP_START_PIXEL_PATTERN {
@@ -456,6 +458,7 @@ enum msm_vfe_reg_cfg_type {
VFE_HW_UPDATE_UNLOCK,
SET_WM_UB_SIZE,
SET_UB_POLICY,
+ GET_VFE_HW_LIMIT,
};
struct msm_vfe_cfg_cmd2 {
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index e18a756b0eda..e5e71939f529 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -4046,6 +4046,7 @@ EXPORT_SYMBOL(msm_anlg_codec_info_create_codec_entry);
static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
{
struct sdm660_cdc_priv *sdm660_cdc;
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
int ret;
sdm660_cdc = dev_get_drvdata(codec->dev);
@@ -4154,6 +4155,9 @@ static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
/* Set initial cap mode */
msm_anlg_cdc_configure_cap(codec, false, false);
+ snd_soc_dapm_ignore_suspend(dapm, "PDM Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM Capture");
+
return 0;
}
@@ -4229,24 +4233,10 @@ static int msm_anlg_cdc_disable_static_supplies_to_optimum(
static int msm_anlg_cdc_suspend(struct snd_soc_codec *codec)
{
- struct msm_asoc_mach_data *pdata = NULL;
struct sdm660_cdc_priv *sdm660_cdc = snd_soc_codec_get_drvdata(codec);
struct sdm660_cdc_pdata *sdm660_cdc_pdata =
sdm660_cdc->dev->platform_data;
- pdata = snd_soc_card_get_drvdata(codec->component.card);
- pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
- __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
- atomic_read(&pdata->int_mclk0_enabled));
- if (atomic_read(&pdata->int_mclk0_enabled) == true) {
- cancel_delayed_work_sync(&pdata->disable_int_mclk0_work);
- mutex_lock(&pdata->cdc_int_mclk0_mutex);
- pdata->digital_cdc_core_clk.enable = 0;
- afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
- &pdata->digital_cdc_core_clk);
- atomic_set(&pdata->int_mclk0_enabled, false);
- mutex_unlock(&pdata->cdc_int_mclk0_mutex);
- }
msm_anlg_cdc_disable_static_supplies_to_optimum(sdm660_cdc,
sdm660_cdc_pdata);
return 0;
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index d8828a1e36b7..91faee1ffd32 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -79,7 +79,7 @@ static int msm_digcdc_clock_control(bool flag)
if (atomic_read(&pdata->int_mclk0_enabled) == false) {
pdata->digital_cdc_core_clk.enable = 1;
ret = afe_set_lpass_clock_v2(
- AFE_PORT_ID_PRIMARY_MI2S_RX,
+ AFE_PORT_ID_INT0_MI2S_RX,
&pdata->digital_cdc_core_clk);
if (ret < 0) {
pr_err("%s:failed to enable the MCLK\n",
@@ -1166,6 +1166,7 @@ EXPORT_SYMBOL(msm_dig_codec_info_create_codec_entry);
static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
{
struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
int i, ret;
msm_dig_cdc->codec = codec;
@@ -1197,6 +1198,15 @@ static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
}
registered_digcodec = codec;
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Playback");
+ snd_soc_dapm_ignore_suspend(dapm, "AIF1 Capture");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC1_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC2_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "ADC3_IN");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX1");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX2");
+ snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX3");
+
return 0;
}
@@ -1969,9 +1979,27 @@ static struct regmap *msm_digital_get_regmap(struct device *dev)
return msm_dig_cdc->regmap;
}
+static int msm_dig_cdc_suspend(struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ msm_dig_cdc->dapm_bias_off = 1;
+ return 0;
+}
+
+static int msm_dig_cdc_resume(struct snd_soc_codec *codec)
+{
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(codec->dev);
+
+ msm_dig_cdc->dapm_bias_off = 0;
+ return 0;
+}
+
static struct snd_soc_codec_driver soc_msm_dig_codec = {
.probe = msm_dig_cdc_soc_probe,
.remove = msm_dig_cdc_soc_remove,
+ .suspend = msm_dig_cdc_suspend,
+ .resume = msm_dig_cdc_resume,
.controls = msm_dig_snd_controls,
.num_controls = ARRAY_SIZE(msm_dig_snd_controls),
.dapm_widgets = msm_dig_dapm_widgets,
@@ -2054,6 +2082,44 @@ static int msm_dig_cdc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int msm_dig_suspend(struct device *dev)
+{
+ struct msm_asoc_mach_data *pdata =
+ snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ struct msm_dig_priv *msm_dig_cdc = dev_get_drvdata(dev);
+
+ if (msm_dig_cdc->dapm_bias_off) {
+ pr_debug("%s: mclk cnt = %d, mclk_enabled = %d\n",
+ __func__, atomic_read(&pdata->int_mclk0_rsc_ref),
+ atomic_read(&pdata->int_mclk0_enabled));
+
+ if (atomic_read(&pdata->int_mclk0_enabled) == true) {
+ cancel_delayed_work_sync(
+ &pdata->disable_int_mclk0_work);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
+ pdata->digital_cdc_core_clk.enable = 0;
+ afe_set_lpass_clock_v2(AFE_PORT_ID_INT0_MI2S_RX,
+ &pdata->digital_cdc_core_clk);
+ atomic_set(&pdata->int_mclk0_enabled, false);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
+ }
+ }
+
+ return 0;
+}
+
+static int msm_dig_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops msm_dig_pm_ops = {
+ .suspend = msm_dig_suspend,
+ .resume = msm_dig_resume,
+};
+#endif
+
static const struct of_device_id msm_dig_cdc_of_match[] = {
{.compatible = "qcom,msm-digital-codec"},
{},
@@ -2064,6 +2130,9 @@ static struct platform_driver msm_digcodec_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = msm_dig_cdc_of_match,
+#ifdef CONFIG_PM
+ .pm = &msm_dig_pm_ops,
+#endif
},
.probe = msm_dig_cdc_probe,
.remove = msm_dig_cdc_remove,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
index b401a4082cbb..f0e7a9cf9228 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.h
@@ -47,6 +47,7 @@ struct msm_dig_priv {
struct regmap *regmap;
struct notifier_block nblock;
u32 mute_mask;
+ int dapm_bias_off;
void *handle;
void (*update_clkdiv)(void *handle, int val);
int (*get_cdc_version)(void *handle);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
index 0b27f3f62b02..27b96799be2b 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,7 @@ struct wcd934x_mbhc {
bool is_hph_recover;
};
+#ifdef CONFIG_SND_SOC_WCD934X_MBHC
extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
struct snd_soc_codec *codec,
struct fw_info *fw_data);
@@ -46,6 +47,40 @@ extern int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
struct snd_soc_codec *codec);
extern int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
uint32_t *zl, uint32_t *zr);
+#else
+static inline int tavil_mbhc_init(struct wcd934x_mbhc **mbhc,
+ struct snd_soc_codec *codec,
+ struct fw_info *fw_data)
+{
+ return 0;
+}
+static inline void tavil_mbhc_hs_detect_exit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_hs_detect(struct snd_soc_codec *codec,
+ struct wcd_mbhc_config *mbhc_cfg)
+{
+ return 0;
+}
+static inline void tavil_mbhc_deinit(struct snd_soc_codec *codec)
+{
+}
+static inline int tavil_mbhc_post_ssr_init(struct wcd934x_mbhc *mbhc,
+ struct snd_soc_codec *codec)
+{
+ return 0;
+}
+static inline int tavil_mbhc_get_impedance(struct wcd934x_mbhc *wcd934x_mbhc,
+ uint32_t *zl, uint32_t *zr)
+{
+ if (zl)
+ *zl = 0;
+ if (zr)
+ *zr = 0;
+ return -EINVAL;
+}
+#endif
+
#endif /* __WCD934X_MBHC_H__ */
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 4fa80c679b46..8e986a74ffff 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -56,8 +56,8 @@
#define FLAC_BLK_SIZE_LIMIT 65535
/* Timestamp mode payload offsets */
-#define TS_LSW_OFFSET 6
-#define TS_MSW_OFFSET 7
+#define CAPTURE_META_DATA_TS_OFFSET_LSW 6
+#define CAPTURE_META_DATA_TS_OFFSET_MSW 7
/* decoder parameter length */
#define DDP_DEC_MAX_NUM_PARAM 18
@@ -410,6 +410,7 @@ static int msm_compr_send_buffer(struct msm_compr_audio *prtd)
int buffer_length;
uint64_t bytes_available;
struct audio_aio_write_param param;
+ struct snd_codec_metadata *buff_addr;
if (!atomic_read(&prtd->start)) {
pr_err("%s: stream is not in started state\n", __func__);
@@ -442,23 +443,34 @@ static int msm_compr_send_buffer(struct msm_compr_audio *prtd)
}
if (buffer_length) {
- param.paddr = prtd->buffer_paddr + prtd->byte_offset;
+ param.paddr = prtd->buffer_paddr + prtd->byte_offset;
WARN(prtd->byte_offset % 32 != 0, "offset %x not multiple of 32",
prtd->byte_offset);
}
else
- param.paddr = prtd->buffer_paddr;
-
+ param.paddr = prtd->buffer_paddr;
param.len = buffer_length;
- param.msw_ts = 0;
- param.lsw_ts = 0;
- param.flags = NO_TIMESTAMP;
+ if (prtd->ts_header_offset) {
+ buff_addr = (struct snd_codec_metadata *)
+ (prtd->buffer + prtd->byte_offset);
+ param.len = buff_addr->length;
+ param.msw_ts = (uint32_t)
+ ((buff_addr->timestamp & 0xFFFFFFFF00000000LL) >> 32);
+ param.lsw_ts = (uint32_t) (buff_addr->timestamp & 0xFFFFFFFFLL);
+ param.paddr += prtd->ts_header_offset;
+ param.flags = SET_TIMESTAMP;
+ param.metadata_len = prtd->ts_header_offset;
+ } else {
+ param.msw_ts = 0;
+ param.lsw_ts = 0;
+ param.flags = NO_TIMESTAMP;
+ param.metadata_len = 0;
+ }
param.uid = buffer_length;
- param.metadata_len = 0;
param.last_buffer = prtd->last_buffer;
pr_debug("%s: sending %d bytes to DSP byte_offset = %d\n",
- __func__, buffer_length, prtd->byte_offset);
+ __func__, param.len, prtd->byte_offset);
if (q6asm_async_write(prtd->audio_client, &param) < 0) {
pr_err("%s:q6asm_async_write failed\n", __func__);
} else {
@@ -577,9 +589,21 @@ static void compr_event_handler(uint32_t opcode,
* written to ADSP in the last write, update offset and
* total copied data accordingly.
*/
-
- prtd->byte_offset += token;
- prtd->copied_total += token;
+ if (prtd->ts_header_offset) {
+ /* Always assume that the data will be sent to DSP on
+ * frame boundary.
+ * i.e, one frame of userspace write will result in
+ * one kernel write to DSP. This is needed as
+ * timestamp will be sent per frame.
+ */
+ prtd->byte_offset +=
+ prtd->codec_param.buffer.fragment_size;
+ prtd->copied_total +=
+ prtd->codec_param.buffer.fragment_size;
+ } else {
+ prtd->byte_offset += token;
+ prtd->copied_total += token;
+ }
if (prtd->byte_offset >= prtd->buffer_size)
prtd->byte_offset -= prtd->buffer_size;
@@ -634,10 +658,10 @@ static void compr_event_handler(uint32_t opcode,
*buff_addr = prtd->ts_header_offset;
buff_addr++;
/* Write the TS LSW */
- *buff_addr = payload[TS_LSW_OFFSET];
+ *buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_LSW];
buff_addr++;
/* Write the TS MSW */
- *buff_addr = payload[TS_MSW_OFFSET];
+ *buff_addr = payload[CAPTURE_META_DATA_TS_OFFSET_MSW];
}
/* Always assume read_size is same as fragment_size */
read_size = prtd->codec_param.buffer.fragment_size;
@@ -1320,6 +1344,12 @@ static int msm_compr_configure_dsp_for_playback
prtd->buffer_paddr = ac->port[dir].buf[0].phys;
prtd->buffer_size = runtime->fragments * runtime->fragment_size;
+ /* Bit-0 of flags represent timestamp mode */
+ if (prtd->codec_param.codec.flags & COMPRESSED_TIMESTAMP_FLAG)
+ prtd->ts_header_offset = sizeof(struct snd_codec_metadata);
+ else
+ prtd->ts_header_offset = 0;
+
ret = msm_compr_send_media_format_block(cstream, ac->stream_id, false);
if (ret < 0) {
pr_err("%s, failed to send media format block\n", __func__);
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index c3a4719542ef..f38108258306 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -7466,9 +7466,13 @@ int q6asm_async_write(struct audio_client *ac,
else if (ac->io_mode == io_compressed ||
ac->io_mode == io_compressed_stream)
lbuf_phys_addr = (param->paddr - param->metadata_len);
- else
- lbuf_phys_addr = param->paddr;
-
+ else {
+ if (param->flags & SET_TIMESTAMP)
+ lbuf_phys_addr = param->paddr -
+ sizeof(struct snd_codec_metadata);
+ else
+ lbuf_phys_addr = param->paddr;
+ }
dev_vdbg(ac->dev, "%s: token[0x%x], buf_addr[%pK], buf_size[0x%x], ts_msw[0x%x], ts_lsw[0x%x], lbuf_phys_addr: 0x[%pK]\n",
__func__,
write.hdr.token, &param->paddr,