summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/msm/sde.txt55
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi92
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi58
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dtsi37
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi23
-rw-r--r--arch/arm/configs/msmcortex_defconfig4
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig4
-rw-r--r--arch/arm64/configs/msmcortex_defconfig4
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c29
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c6
-rw-r--r--drivers/gpu/drm/msm/Makefile22
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c167
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h125
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h558
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c1321
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h192
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h164
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c858
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h18
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c432
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c610
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c523
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h88
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c25
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c498
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c37
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h479
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c303
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c296
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.h115
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.c105
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.h127
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.c969
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.h245
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c381
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.h106
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.c192
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.h96
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c364
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h137
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h0
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c73
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h56
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h358
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.c159
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.h115
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.c589
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.h282
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.c120
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.h85
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hwio.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c350
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c496
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h230
-rw-r--r--drivers/gpu/drm/msm/sde/sde_mdp_formats.c134
-rw-r--r--drivers/gpu/drm/msm/sde/sde_mdp_formats.h104
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c767
-rw-r--r--drivers/input/misc/qpnp-power-on.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c875
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c56
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c779
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h260
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c453
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h260
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c835
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c572
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c93
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h208
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c1147
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c814
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h74
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h16
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c220
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c73
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h88
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c80
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c833
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h404
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/nfc/nq-nci.c23
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h11
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h23
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c34
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c241
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c4
-rw-r--r--drivers/regulator/core.c359
-rw-r--r--drivers/soc/qcom/icnss.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c4
-rw-r--r--include/sound/apr_audio-v2.h151
-rw-r--r--include/sound/q6adm-v2.h10
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c15
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c30
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c212
126 files changed, 21465 insertions, 2232 deletions
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
new file mode 100644
index 000000000000..8ec9f78346d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -0,0 +1,55 @@
+Qualcomm Technologies, Inc. SDE KMS
+
+Snapdragon Display Engine implements Linux DRM/KMS APIs to drive user
+interface to different panel interfaces. SDE driver is the core of
+display subsystem which manage all data paths to different panel interfaces.
+
+Required properties
+- compatible: Must be "qcom,sde-kms"
+- reg: Offset and length of the register set for the device.
+- reg-names : Names to refer to register sets related to this device
+- clocks: List of Phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+- mmagic-supply: Phandle for mmagic mdss supply regulator device node.
+- vdd-supply: Phandle for vdd regulator device node.
+- interrupt-parent: Must be core interrupt controller.
+- interrupts: Interrupt associated with MDSS.
+- interrupt-controller: Mark the device node as an interrupt controller.
+- #interrupt-cells: Should be one. The first cell is interrupt number.
+- iommus: Specifies the SID's used by this context bank.
+
+Please refer to ../../interrupt-controller/interrupts.txt for a general
+description of interrupt bindings.
+
+Example:
+ mdss_mdp: qcom,mdss_mdp@900000 {
+ compatible = "qcom,sde-kms";
+ reg = <0x00900000 0x90000>,
+ <0x009b0000 0x1040>,
+ <0x009b8000 0x1040>;
+ reg-names = "mdp_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+ clocks = <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_smmu_mdp_axi_clk>,
+ <&clock_mmss clk_mmagic_mdss_axi_clk>,
+ <&clock_mmss clk_mdss_vsync_clk>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk_src",
+ "core_clk",
+ "iommu_clk",
+ "mmagic_clk",
+ "vsync_clk";
+ mmagic-supply = <&gdsc_mmagic_mdss>;
+ vdd-supply = <&gdsc_mdss>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ iommus = <&mdp_smmu 0>;
+ };
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
index 1a743fe7e6d2..9bab7c037d40 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
@@ -30,6 +30,7 @@
#include "dsi-panel-sim-cmd.dtsi"
#include "dsi-panel-sim-dualmipi-video.dtsi"
#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -189,3 +190,9 @@
qcom,mdss-dsi-t-clk-post = <0x06>;
qcom,mdss-dsi-t-clk-pre = <0x22>;
};
+
+&dsi_dual_s6e3ha3_amoled_cmd {
+ qcom,mdss-dsi-panel-timings = [00 1c 06 06 0b 10 06 07 05 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x07>;
+ qcom,mdss-dsi-t-clk-pre = <0x2a>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
index 37daa8d9af6f..25e381c2cb18 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
@@ -343,3 +343,95 @@
qcom,ramp-step-ms = <255>;
qcom,use-blink;
};
+
+&pmx_mdss {
+ mdss_dsi_active: mdss_dsi_active {
+ mux {
+ pins = "gpio52", "gpio94";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio52", "gpio94";
+ drive-strength = <8>; /* 8 mA */
+ bias-disable = <0>; /* no pull */
+ };
+ };
+
+ mdss_dsi_suspend: mdss_dsi_suspend {
+ mux {
+ pins = "gpio52", "gpio94";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio52", "gpio94";
+ drive-strength = <2>; /* 2 mA */
+ bias-pull-down; /* pull down */
+ };
+ };
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_s6e3ha3_amoled_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-enable-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-bklight-en-gpio = <&pmi8998_gpios 1 0>;
+ qcom,platform-bklight-en-gpio-invert;
+};
+
+&mdss_dsi1 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_s6e3ha3_amoled_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-enable-gpio = <&tlmm 52 0>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-bklight-en-gpio = <&pmi8998_gpios 1 0>;
+ qcom,platform-bklight-en-gpio-invert;
+};
+
+&pmi8998_wled {
+ qcom,disp-type-amoled;
+};
+
+&labibb {
+ status = "ok";
+ qcom,qpnp-labibb-mode = "amoled";
+ qcom,swire-control;
+};
+
+&pmi8998_gpios {
+ /* GPIO 1 for WLED power enable */
+ gpio@c000 {
+ qcom,mode = <1>;
+ qcom,output-type = <0>;
+ qcom,pull = <5>;
+ qcom,vin-sel = <0>;
+ qcom,out-strength = <1>;
+ qcom,src-sel = <0>;
+ qcom,invert = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&dsi_dual_s6e3ha3_amoled_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <255>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 6a11e7c51ca5..150a3c42c6b7 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x9040>;
};
+ L1_TLB_0: l1-tlb {
+ qcom,dump-size = <0x2000>;
+ };
};
CPU1: cpu@1 {
@@ -84,6 +87,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x9040>;
};
+ L1_TLB_1: l1-tlb {
+ qcom,dump-size = <0x2000>;
+ };
};
CPU2: cpu@2 {
@@ -104,6 +110,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x9040>;
};
+ L1_TLB_2: l1-tlb {
+ qcom,dump-size = <0x2000>;
+ };
};
CPU3: cpu@3 {
@@ -124,6 +133,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x9040>;
};
+ L1_TLB_3: l1-tlb {
+ qcom,dump-size = <0x2000>;
+ };
};
CPU4: cpu@100 {
@@ -148,6 +160,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
+ L1_TLB_100: l1-tlb {
+ qcom,dump-size = <0x4800>;
+ };
};
CPU5: cpu@101 {
@@ -168,6 +183,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
+ L1_TLB_101: l1-tlb {
+ qcom,dump-size = <0x4800>;
+ };
};
CPU6: cpu@102 {
@@ -188,6 +206,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
+ L1_TLB_102: l1-tlb {
+ qcom,dump-size = <0x4800>;
+ };
};
CPU7: cpu@103 {
@@ -208,6 +229,9 @@
compatible = "arm,arch-cache";
qcom,dump-size = <0x12000>;
};
+ L1_TLB_103: l1-tlb {
+ qcom,dump-size = <0x4800>;
+ };
};
cpu-map {
@@ -2914,6 +2938,38 @@
qcom,dump-node = <&L1_D_103>;
qcom,dump-id = <0x87>;
};
+ qcom,l1_tlb_dump0 {
+ qcom,dump-node = <&L1_TLB_0>;
+ qcom,dump-id = <0x20>;
+ };
+ qcom,l1_tlb_dump1 {
+ qcom,dump-node = <&L1_TLB_1>;
+ qcom,dump-id = <0x21>;
+ };
+ qcom,l1_tlb_dump2 {
+ qcom,dump-node = <&L1_TLB_2>;
+ qcom,dump-id = <0x22>;
+ };
+ qcom,l1_tlb_dump3 {
+ qcom,dump-node = <&L1_TLB_3>;
+ qcom,dump-id = <0x23>;
+ };
+ qcom,l1_tlb_dump100 {
+ qcom,dump-node = <&L1_TLB_100>;
+ qcom,dump-id = <0x24>;
+ };
+ qcom,l1_tlb_dump101 {
+ qcom,dump-node = <&L1_TLB_101>;
+ qcom,dump-id = <0x25>;
+ };
+ qcom,l1_tlb_dump102 {
+ qcom,dump-node = <&L1_TLB_102>;
+ qcom,dump-id = <0x26>;
+ };
+ qcom,l1_tlb_dump103 {
+ qcom,dump-node = <&L1_TLB_103>;
+ qcom,dump-id = <0x27>;
+ };
};
ssc_sensors: qcom,msm-ssc-sensors {
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index a6bc326a6508..f9915fbf3f58 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -179,7 +179,7 @@
vdd-supply = <&pm660l_l1>;
core-supply = <&pm660_l10>;
qcom,vdd-voltage-level = <0 925000 925000>;
- vdd-core-voltage-level = <0 1800000 1800000>;
+ qcom,core-voltage-level = <0 1800000 1800000>;
qcom,vbus-valid-override;
qcom,qmp-phy-init-seq =
/* <reg_offset, value, delay> */
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index 84a99be7371e..25337fefcdb1 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include "dsi-panel-sim-dualmipi-video.dtsi"
#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
#include "dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -81,3 +82,11 @@
23 1e 07 08 05 03 04 a0
23 18 07 08 04 03 04 a0];
};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1f 08 09 05 03 04 a0
+ 24 1c 08 09 05 03 04 a0];
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
index 1f6344db68e1..6037bfd97fef 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
@@ -150,3 +150,40 @@
qcom,fg-jeita-thresholds = <0 5 55 55>;
qcom,fg-cutoff-voltage = <3700>;
};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_fb0 {
+ qcom,mdss-mixer-swap;
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_nt36850_truly_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 53 0>;
+ qcom,platform-te-gpio = <&tlmm 59 0>;
+};
+
+&mdss_dsi1 {
+ qcom,dsi-pref-prim-pan = <&dsi_dual_nt36850_truly_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 53 0>;
+ qcom,platform-te-gpio = <&tlmm 59 0>;
+};
+
+&dsi_dual_nt36850_truly_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 4a13675a07bb..05822d9d7c4e 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -491,7 +491,6 @@
};
wdog: qcom,wdt@17817000 {
- status = "disabled";
compatible = "qcom,msm-watchdog";
reg = <0x17817000 0x1000>;
reg-names = "wdt-base";
@@ -500,6 +499,7 @@
qcom,pet-time = <10000>;
qcom,ipi-ping;
qcom,wakeup-enable;
+ qcom,scandump-size = <0x40000>;
};
qcom,sps {
@@ -984,11 +984,12 @@
};
bwmon: qcom,cpu-bwmon {
- compatible = "qcom,bimc-bwmon3";
+ compatible = "qcom,bimc-bwmon4";
reg = <0x01008000 0x300>, <0x01001000 0x200>;
reg-names = "base", "global_base";
interrupts = <0 183 4>;
qcom,mport = <0>;
+ qcom,hw-timer-hz = <19200000>;
qcom,target-dev = <&cpubw>;
};
@@ -1177,16 +1178,8 @@
msm_cpufreq: qcom,msm-cpufreq {
compatible = "qcom,msm-cpufreq";
- clock-names = "cpu0_clk", "cpu1_clk", "cpu2_clk",
- "cpu3_clk", "cpu4_clk", "cpu5_clk",
- "cpu6_clk", "cpu7_clk";
+ clock-names = "cpu0_clk", "cpu4_clk";
clocks = <&clock_cpu PWRCL_CLK>,
- <&clock_cpu PWRCL_CLK>,
- <&clock_cpu PWRCL_CLK>,
- <&clock_cpu PWRCL_CLK>,
- <&clock_cpu PERFCL_CLK>,
- <&clock_cpu PERFCL_CLK>,
- <&clock_cpu PERFCL_CLK>,
<&clock_cpu PERFCL_CLK>;
qcom,governor-per-policy;
@@ -1966,7 +1959,7 @@
reg = <0x1de0000 0x20000>,
<0x1dc4000 0x24000>;
reg-names = "crypto-base","crypto-bam-base";
- interrupts = <0 207 0>;
+ interrupts = <0 206 0>;
qcom,bam-pipe-pair = <1>;
qcom,ce-hw-instance = <0>;
qcom,ce-device = <0>;
@@ -1992,7 +1985,7 @@
reg = <0x1de0000 0x20000>,
<0x1dc4000 0x24000>;
reg-names = "crypto-base","crypto-bam-base";
- interrupts = <0 207 0>;
+ interrupts = <0 206 0>;
qcom,bam-pipe-pair = <2>;
qcom,ce-hw-instance = <0>;
qcom,ce-device = <0>;
@@ -2028,9 +2021,9 @@
hyplog-size-offset = <0x414>;
};
- qcom_rng: qrng@794000 {
+ qcom_rng: qrng@793000 {
compatible = "qcom,msm-rng";
- reg = <0x794000 0x1000>;
+ reg = <0x793000 0x1000>;
qcom,msm-rng-iface-clk;
qcom,no-qrng-config;
qcom,msm-bus,name = "msm-rng-noc";
diff --git a/arch/arm/configs/msmcortex_defconfig b/arch/arm/configs/msmcortex_defconfig
index 9cf5a15e80eb..1831f2c63ca5 100644
--- a/arch/arm/configs/msmcortex_defconfig
+++ b/arch/arm/configs/msmcortex_defconfig
@@ -322,6 +322,7 @@ CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -330,6 +331,9 @@ CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_CAMERA=y
CONFIG_MSM_CAMERA_DEBUG=y
CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 07d24ea6b707..7c34ea8caba1 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -354,6 +354,7 @@ CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -392,6 +393,9 @@ CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 25b5c206e1ae..9d7baa24e271 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -357,6 +357,7 @@ CONFIG_REGULATOR_PROXY_CONSUMER=y
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_VIDEO_ADV_DEBUG=y
@@ -395,6 +396,9 @@ CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 79db93ac5ae1..095530e72b78 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -185,28 +185,7 @@ static int clk_smd_rpm_prepare(struct clk_hw *hw);
static int clk_smd_rpm_handoff(struct clk_hw *hw)
{
- int ret = 0;
- uint32_t value = cpu_to_le32(INT_MAX);
- struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
- struct msm_rpm_kvp req = {
- .key = cpu_to_le32(r->rpm_key),
- .data = (void *)&value,
- .length = sizeof(value),
- };
-
- ret = msm_rpm_send_message(QCOM_SMD_RPM_ACTIVE_STATE, r->rpm_res_type,
- r->rpm_clk_id, &req, 1);
- if (ret)
- return ret;
-
- ret = msm_rpm_send_message(QCOM_SMD_RPM_SLEEP_STATE, r->rpm_res_type,
- r->rpm_clk_id, &req, 1);
- if (ret)
- return ret;
-
- ret = clk_smd_rpm_prepare(hw);
-
- return ret;
+ return clk_smd_rpm_prepare(hw);
}
static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
@@ -282,12 +261,12 @@ static int clk_smd_rpm_prepare(struct clk_hw *hw)
mutex_lock(&rpm_smd_clk_lock);
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
/* Don't send requests to the RPM if the rate has not been set. */
- if (!r->rate)
+ if (this_rate == 0)
goto out;
- to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
-
/* Take peer clock's rate into account only if it's enabled. */
if (peer->enabled)
to_active_sleep(peer, peer->rate,
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 2aa7b783f276..cb7323886082 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -3,7 +3,7 @@
* MSM architecture cpufreq driver
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
* Author: Mike A. Chan <mikechan@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -390,8 +390,10 @@ static int __init msm_cpufreq_probe(struct platform_device *pdev)
for_each_possible_cpu(cpu) {
snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
c = devm_clk_get(dev, clk_name);
- if (IS_ERR(c))
+ if (cpu == 0 && IS_ERR(c))
return PTR_ERR(c);
+ else if (IS_ERR(c))
+ c = cpu_clk[cpu-1];
cpu_clk[cpu] = c;
}
hotplug_ready = true;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..7c73657b399e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -38,6 +38,13 @@ msm-y := \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ sde/sde_crtc.o \
+ sde/sde_encoder.o \
+ sde/sde_encoder_phys_vid.o \
+ sde/sde_encoder_phys_cmd.o \
+ sde/sde_irq.o \
+ sde/sde_kms.o \
+ sde/sde_plane.o \
msm_atomic.o \
msm_drv.o \
msm_fb.o \
@@ -46,6 +53,7 @@ msm-y := \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
+ msm_smmu.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o
@@ -69,3 +77,17 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
+
+obj-$(CONFIG_DRM_MSM) += sde/sde_hw_catalog.o \
+ sde/sde_hw_catalog_8996.o \
+ sde/sde_hw_cdm.o \
+ sde/sde_hw_dspp.o \
+ sde/sde_hw_intf.o \
+ sde/sde_hw_lm.o \
+ sde/sde_hw_mdp_ctl.o \
+ sde/sde_hw_mdp_util.o \
+ sde/sde_hw_sspp.o \
+ sde/sde_hw_wb.o \
+ sde/sde_hw_pingpong.o \
+ sde/sde_hw_interrupts.o \
+ sde/sde_mdp_formats.o
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
new file mode 100644
index 000000000000..114998fb8fc5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+ ctrl->ops.host_setup = dsi_ctrl_hw_14_host_setup;
+ ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
+ ctrl->ops.video_engine_en = dsi_ctrl_hw_14_video_engine_en;
+ ctrl->ops.video_engine_setup = dsi_ctrl_hw_14_video_engine_setup;
+ ctrl->ops.set_video_timing = dsi_ctrl_hw_14_set_video_timing;
+ ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_14_cmd_engine_setup;
+ ctrl->ops.ctrl_en = dsi_ctrl_hw_14_ctrl_en;
+ ctrl->ops.cmd_engine_en = dsi_ctrl_hw_14_cmd_engine_en;
+ ctrl->ops.phy_sw_reset = dsi_ctrl_hw_14_phy_sw_reset;
+ ctrl->ops.soft_reset = dsi_ctrl_hw_14_soft_reset;
+ ctrl->ops.kickoff_command = dsi_ctrl_hw_14_kickoff_command;
+ ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_14_kickoff_fifo_command;
+ ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_14_reset_cmd_fifo;
+ ctrl->ops.trigger_command_dma = dsi_ctrl_hw_14_trigger_command_dma;
+ ctrl->ops.ulps_request = dsi_ctrl_hw_14_ulps_request;
+ ctrl->ops.ulps_exit = dsi_ctrl_hw_14_ulps_exit;
+ ctrl->ops.clear_ulps_request = dsi_ctrl_hw_14_clear_ulps_request;
+ ctrl->ops.get_lanes_in_ulps = dsi_ctrl_hw_14_get_lanes_in_ulps;
+ ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
+ ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
+ ctrl->ops.get_interrupt_status = dsi_ctrl_hw_14_get_interrupt_status;
+ ctrl->ops.get_error_status = dsi_ctrl_hw_14_get_error_status;
+ ctrl->ops.clear_error_status = dsi_ctrl_hw_14_clear_error_status;
+ ctrl->ops.clear_interrupt_status =
+ dsi_ctrl_hw_14_clear_interrupt_status;
+ ctrl->ops.enable_status_interrupts =
+ dsi_ctrl_hw_14_enable_status_interrupts;
+ ctrl->ops.enable_error_interrupts =
+ dsi_ctrl_hw_14_enable_error_interrupts;
+ ctrl->ops.video_test_pattern_setup =
+ dsi_ctrl_hw_14_video_test_pattern_setup;
+ ctrl->ops.cmd_test_pattern_setup =
+ dsi_ctrl_hw_14_cmd_test_pattern_setup;
+ ctrl->ops.test_pattern_enable = dsi_ctrl_hw_14_test_pattern_enable;
+ ctrl->ops.trigger_cmd_test_pattern =
+ dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+ set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_CTRL_VERSION_UNKNOWN ||
+ version >= DSI_CTRL_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ ctrl->index = index;
+ set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+ set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+ set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+ set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+ switch (version) {
+ case DSI_CTRL_VERSION_1_4:
+ dsi_catalog_14_init(ctrl);
+ break;
+ case DSI_CTRL_VERSION_2_0:
+ dsi_catalog_20_init(ctrl);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+ phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+ phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+ phy->ops.enable = dsi_phy_hw_v4_0_enable;
+ phy->ops.disable = dsi_phy_hw_v4_0_disable;
+ phy->ops.calculate_timing_params =
+ dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_PHY_VERSION_UNKNOWN ||
+ version >= DSI_PHY_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ phy->index = index;
+ set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+ switch (version) {
+ case DSI_PHY_VERSION_4_0:
+ dsi_catalog_phy_4_0_init(phy);
+ break;
+ case DSI_PHY_VERSION_1_0:
+ case DSI_PHY_VERSION_2_0:
+ case DSI_PHY_VERSION_3_0:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
new file mode 100644
index 000000000000..e4b33c259540
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *cfg,
+ struct dsi_phy_per_lane_cfgs
+ *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+ u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
new file mode 100644
index 000000000000..b5ddfbb4ef72
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER 0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX: max version
+ */
+enum dsi_ctrl_version {
+ DSI_CTRL_VERSION_UNKNOWN,
+ DSI_CTRL_VERSION_1_4,
+ DSI_CTRL_VERSION_2_0,
+ DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG: Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG: Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE: variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH: variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION: NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB: Deskew calibration support
+ * @DSI_CTRL_DPHY: Controller support for DPHY
+ * @DSI_CTRL_CPHY: Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+ DSI_CTRL_VIDEO_TPG,
+ DSI_CTRL_CMD_TPG,
+ DSI_CTRL_VARIABLE_REFRESH_RATE,
+ DSI_CTRL_DYNAMIC_REFRESH,
+ DSI_CTRL_NULL_PACKET_INSERTION,
+ DSI_CTRL_DESKEW_CALIB,
+ DSI_CTRL_DPHY,
+ DSI_CTRL_CPHY,
+ DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED: Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC: Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY: Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+ DSI_TEST_PATTERN_FIXED = 0,
+ DSI_TEST_PATTERN_INC,
+ DSI_TEST_PATTERN_POLY,
+ DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE: A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE: A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE: A BTA is completed.
+ * @DSI_CMD_FRAME_DONE: A frame of selected command mode stream is
+ * sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE: The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE: The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
+ * completed.
+ */
+enum dsi_status_int_type {
+ DSI_CMD_MODE_DMA_DONE = BIT(0),
+ DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+ DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+ DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+ DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+ DSI_BTA_DONE = BIT(5),
+ DSI_CMD_FRAME_DONE = BIT(6),
+ DSI_DYN_REFRESH_DONE = BIT(7),
+ DSI_DESKEW_DONE = BIT(8),
+ DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR: CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT: Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT: Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT: Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT: High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT: BTA timeout.
+ * @DSI_PLL_UNLOCK: PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
+ * receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW: High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW: High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW: High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW: High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION: PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION: PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION: PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION: PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION: PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+ DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+ DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+ DSI_RDBK_CRC_ERR = BIT(2),
+ DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+ DSI_PERIPH_ERROR_PKT = BIT(4),
+ DSI_LP_RX_TIMEOUT = BIT(5),
+ DSI_HS_TX_TIMEOUT = BIT(6),
+ DSI_BTA_TIMEOUT = BIT(7),
+ DSI_PLL_UNLOCK = BIT(8),
+ DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+ DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+ DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+ DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+ DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+ DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+ DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+ DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+ DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+ DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+ DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+ DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+ DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+ DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+ DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+ DSI_DLN0_LP0_CONTENTION = BIT(24),
+ DSI_DLN1_LP0_CONTENTION = BIT(25),
+ DSI_DLN2_LP0_CONTENTION = BIT(26),
+ DSI_DLN3_LP0_CONTENTION = BIT(27),
+ DSI_DLN0_LP1_CONTENTION = BIT(28),
+ DSI_DLN1_LP1_CONTENTION = BIT(29),
+ DSI_DLN2_LP1_CONTENTION = BIT(30),
+ DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset: IOMMU VA for command buffer address.
+ * @length: Length of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+ u32 offset;
+ u32 length;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command: VA for command buffer.
+ * @size: Size of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+ u32 *command;
+ u32 size;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+ /**
+ * host_setup() - Setup DSI host configuration
+ * @ctrl: Pointer to controller host hardware.
+ * @config: Configuration for DSI host controller
+ */
+ void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+
+ /**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+ void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+ void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+
+ /**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+ void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+ /**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+ void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+ /**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+ void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+ void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+ void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+ void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+
+ /**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+ /**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+
+ void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+ /**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+ void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ */
+ u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 total_read_len);
+
+ /**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+ void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+ u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+ void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+ /**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+ void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+
+ /**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+ u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+ void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+ void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+ u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+ void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+ void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+ void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+
+ /**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+ void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+
+ /**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+ void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+ /**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+ void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base: VA for the DSI controller base address.
+ * @length: Length of the DSI controller register map.
+ * @index: Instance ID of the controller.
+ * @feature_map: Features supported by the DSI controller.
+ * @ops: Function pointers to the operations supported by the
+ * controller.
+ */
+struct dsi_ctrl_hw {
+ void __iomem *base;
+ u32 length;
+ void __iomem *mmss_misc_base;
+ u32 mmss_misc_length;
+ u32 index;
+
+ /* features */
+ DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+ struct dsi_ctrl_hw_ops ops;
+
+ /* capabilities */
+ u32 supported_interrupts;
+ u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
new file mode 100644
index 000000000000..8326024f76ec
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF 0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg = 0;
+ const u8 trigger_map[DSI_TRIGGER_MAX] = {
+ 0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+ reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+ reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+ reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+ DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg_value = 0;
+
+ dsi_setup_trigger_controls(ctrl, cfg);
+
+ /* Setup clocking timing controls */
+ reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+ reg_value |= (cfg->t_clk_pre & 0x3F);
+ DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+ /* EOT packet control */
+ reg_value = cfg->append_tx_eot ? 1 : 0;
+ reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+ DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+ /* Turn on dsi clocks */
+ DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+ /* Setup DSI control register */
+ reg_value = 0;
+ reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+ reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+ reg_value |= BIT(8); /* Clock lane */
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+ /* Enable Timing double buffering */
+ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+ pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+ udelay(1000);
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+ udelay(100);
+
+ pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 reg_ctrl = 0;
+
+ /* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+ reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+ /* Force enable PCLK, BYTECLK, AHBM_HCLK */
+ reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+ reg |= 0x23F;
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Trigger soft reset */
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+ /* Disable force clock on */
+ reg &= ~(BIT(20) | BIT(11));
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Re-enable DSI controller */
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+ pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode)
+{
+ u32 reg = 0;
+ u32 hs_start = 0;
+ u32 hs_end, active_h_start, active_h_end, h_total;
+ u32 vs_start = 0, vs_end = 0;
+ u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+ hs_end = mode->h_sync_width;
+ active_h_start = mode->h_sync_width + mode->h_back_porch;
+ active_h_end = active_h_start + mode->h_active;
+ h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+ mode->h_front_porch) - 1;
+
+ vpos_end = mode->v_sync_width;
+ active_v_start = mode->v_sync_width + mode->v_back_porch;
+ active_v_end = active_v_start + mode->v_active;
+ v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+ mode->v_front_porch) - 1;
+
+ reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+ reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+ reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+ reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+ reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+ reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+ /* TODO: HS TIMER value? */
+ DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+ DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+ DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+ pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+ reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+ reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+ reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+ reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+ reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+ reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+ reg |= (cfg->traffic_mode & 0x3) << 8;
+ reg |= (cfg->vc_id & 0x3);
+ reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+ DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+ reg = (common_cfg->swap_mode & 0x7) << 12;
+ reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+ reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+ reg |= cmd_mode_format_map[common_cfg->dst_format];
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+ reg = cfg->wr_mem_start & 0xFF;
+ reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+ reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+ pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear VIDEO_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(1);
+ else
+ reg &= ~BIT(1);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear DSI_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear CMD_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(2);
+ else
+ reg &= ~BIT(2);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map)
+{
+ u32 reg_value = 0;
+ u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+ (lane_map->physical_lane1 * 100) +
+ (lane_map->physical_lane2 * 10) +
+ (lane_map->physical_lane3));
+
+ if (lane_number == 123)
+ reg_value = 0;
+ else if (lane_number == 3012)
+ reg_value = 1;
+ else if (lane_number == 2301)
+ reg_value = 2;
+ else if (lane_number == 1230)
+ reg_value = 3;
+ else if (lane_number == 321)
+ reg_value = 4;
+ else if (lane_number == 1032)
+ reg_value = 5;
+ else if (lane_number == 2103)
+ reg_value = 6;
+ else if (lane_number == 3210)
+ reg_value = 7;
+
+ DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+ pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0;
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+ /* wait for writes to complete before kick off */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0, i = 0;
+ u32 *ptr = cmd->command;
+ /*
+ * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+ * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+ */
+ reg = (BIT(1) | BIT(2) | (0x3 << 16));
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ /*
+ * Program the FIFO with command buffer. Hardware requires an extra
+ * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+ */
+ for (i = 0; i < cmd->size; i += 4) {
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+ ptr++;
+ }
+
+ if ((cmd->size / 4) & 0x1)
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+ /* Finish writes before command trigger */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+ pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+ ctrl->index, cmd->size,
+ (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+ /* disable cmd dma tpg */
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+ pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 read_offset,
+ u32 total_read_len)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ u32 read_cnt;
+ u32 rx_byte = 0;
+ u32 repeated_bytes = 0;
+ u8 reg[16];
+ u32 pkt_size = 0;
+ int buf_offset = read_offset;
+
+ lp = (u32 *)rd_buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+
+ if (cnt > 4)
+ cnt = 4;
+
+ if (rx_byte == 4)
+ read_cnt = 4;
+ else
+ read_cnt = pkt_size + 6;
+
+ if (read_cnt > 16) {
+ int bytes_shifted;
+
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+ *temp++ = ntohl(data);
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ rd_buf[j++] = reg[i];
+
+ pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+ return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ if (lanes & DSI_CLOCK_LANE)
+ reg = BIT(4);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg |= BIT(12);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(8);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(9);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(10);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(11);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+ ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ reg &= ~BIT(4); /* clock lane */
+ if (lanes & DSI_DATA_LANE_0)
+ reg &= ~BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg &= ~BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg &= ~BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg &= ~BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+ /*
+ * HPG recommends separate writes for clearing ULPS_REQUEST and
+ * ULPS_EXIT.
+ */
+ DSI_W32(ctrl, DSI_LANE_CTRL, 0x0);
+
+ pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 lanes = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+ if (!(reg & BIT(8)))
+ lanes |= DSI_DATA_LANE_0;
+ if (!(reg & BIT(9)))
+ lanes |= DSI_DATA_LANE_1;
+ if (!(reg & BIT(10)))
+ lanes |= DSI_DATA_LANE_2;
+ if (!(reg & BIT(11)))
+ lanes |= DSI_DATA_LANE_3;
+ if (!(reg & BIT(12)))
+ lanes |= DSI_CLOCK_LANE;
+
+ pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+ return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (enable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (enable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (enable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (enable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (enable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= (clamp_reg << bit_shift);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (disable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (disable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (disable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (disable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (disable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+ clamp_reg <<= bit_shift;
+
+ /* Disable PHY reset skip */
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~(clamp_reg);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 ints = 0;
+
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+ if (reg & BIT(0))
+ ints |= DSI_CMD_MODE_DMA_DONE;
+ if (reg & BIT(8))
+ ints |= DSI_CMD_FRAME_DONE;
+ if (reg & BIT(10))
+ ints |= DSI_CMD_STREAM0_FRAME_DONE;
+ if (reg & BIT(12))
+ ints |= DSI_CMD_STREAM1_FRAME_DONE;
+ if (reg & BIT(14))
+ ints |= DSI_CMD_STREAM2_FRAME_DONE;
+ if (reg & BIT(16))
+ ints |= DSI_VIDEO_MODE_FRAME_DONE;
+ if (reg & BIT(20))
+ ints |= DSI_BTA_DONE;
+ if (reg & BIT(28))
+ ints |= DSI_DYN_REFRESH_DONE;
+ if (reg & BIT(30))
+ ints |= DSI_DESKEW_DONE;
+
+ pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+ return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(0);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(8);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(10);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(12);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(14);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(16);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(20);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(28);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(30);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ /* Do not change value of DSI_ERROR_MASK bit */
+ reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(1);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(9);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(11);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(13);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(15);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(17);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(21);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(29);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(31);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 dln0_phy_err;
+ u32 fifo_status;
+ u32 ack_error;
+ u32 timeout_errors;
+ u32 clk_error;
+ u32 dsi_status;
+ u64 errors = 0;
+
+ dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+ if (dln0_phy_err & BIT(0))
+ errors |= DSI_DLN0_ESC_ENTRY_ERR;
+ if (dln0_phy_err & BIT(4))
+ errors |= DSI_DLN0_ESC_SYNC_ERR;
+ if (dln0_phy_err & BIT(8))
+ errors |= DSI_DLN0_LP_CONTROL_ERR;
+ if (dln0_phy_err & BIT(12))
+ errors |= DSI_DLN0_LP0_CONTENTION;
+ if (dln0_phy_err & BIT(16))
+ errors |= DSI_DLN0_LP1_CONTENTION;
+
+ fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+ if (fifo_status & BIT(7))
+ errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(10))
+ errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(18))
+ errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(19))
+ errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(22))
+ errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(23))
+ errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(26))
+ errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(27))
+ errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(30))
+ errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(31))
+ errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+ ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+ if (ack_error & BIT(16))
+ errors |= DSI_RDBK_SINGLE_ECC_ERR;
+ if (ack_error & BIT(17))
+ errors |= DSI_RDBK_MULTI_ECC_ERR;
+ if (ack_error & BIT(20))
+ errors |= DSI_RDBK_CRC_ERR;
+ if (ack_error & BIT(23))
+ errors |= DSI_RDBK_INCOMPLETE_PKT;
+ if (ack_error & BIT(24))
+ errors |= DSI_PERIPH_ERROR_PKT;
+
+ timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+ if (timeout_errors & BIT(0))
+ errors |= DSI_HS_TX_TIMEOUT;
+ if (timeout_errors & BIT(4))
+ errors |= DSI_LP_RX_TIMEOUT;
+ if (timeout_errors & BIT(8))
+ errors |= DSI_BTA_TIMEOUT;
+
+ clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+ if (clk_error & BIT(16))
+ errors |= DSI_PLL_UNLOCK;
+
+ dsi_status = DSI_R32(ctrl, DSI_STATUS);
+ if (dsi_status & BIT(31))
+ errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+ pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+ return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+ u32 dln0_phy_err = 0;
+ u32 fifo_status = 0;
+ u32 ack_error = 0;
+ u32 timeout_error = 0;
+ u32 clk_error = 0;
+ u32 dsi_status = 0;
+ u32 int_ctrl = 0;
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ ack_error |= BIT(16);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ ack_error |= BIT(17);
+ if (errors & DSI_RDBK_CRC_ERR)
+ ack_error |= BIT(20);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ ack_error |= BIT(23);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ ack_error |= BIT(24);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ timeout_error |= BIT(4);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ timeout_error |= BIT(0);
+ if (errors & DSI_BTA_TIMEOUT)
+ timeout_error |= BIT(8);
+
+ if (errors & DSI_PLL_UNLOCK)
+ clk_error |= BIT(16);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ dln0_phy_err |= BIT(12);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ dln0_phy_err |= BIT(16);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ dln0_phy_err |= BIT(0);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ dln0_phy_err |= BIT(4);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ dln0_phy_err |= BIT(8);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ fifo_status |= BIT(10);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ fifo_status |= BIT(7);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(18);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(22);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(26);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(30);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(19);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(23);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(27);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(31);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ dsi_status |= BIT(31);
+
+ DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+ DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+ DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+ DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+ DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+ DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ int_ctrl |= BIT(24);
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors)
+{
+ u32 int_ctrl = 0;
+ u32 int_mask0 = 0x7FFF3BFF;
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ if (errors)
+ int_ctrl |= BIT(25);
+ else
+ int_ctrl &= ~BIT(25);
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ int_mask0 &= ~BIT(0);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ int_mask0 &= ~BIT(1);
+ if (errors & DSI_RDBK_CRC_ERR)
+ int_mask0 &= ~BIT(2);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ int_mask0 &= ~BIT(3);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ int_mask0 &= ~BIT(4);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ int_mask0 &= ~BIT(5);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ int_mask0 &= ~BIT(6);
+ if (errors & DSI_BTA_TIMEOUT)
+ int_mask0 &= ~BIT(7);
+
+ if (errors & DSI_PLL_UNLOCK)
+ int_mask0 &= ~BIT(28);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ int_mask0 &= ~BIT(24);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ int_mask0 &= ~BIT(25);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ int_mask0 &= ~BIT(21);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ int_mask0 &= ~BIT(22);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ int_mask0 &= ~BIT(23);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(9);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(11);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(16);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(17);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(18);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(19);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(26);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(27);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(29);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(30);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ int_mask0 &= ~BIT(8);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+ pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+ ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val)
+{
+ u32 reg = 0;
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << 4);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << 4);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+ DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id)
+{
+ u32 reg = 0;
+ u32 init_offset;
+ u32 poly_offset;
+ u32 pattern_sel_shift;
+
+ switch (stream_id) {
+ case 0:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+ pattern_sel_shift = 8;
+ break;
+ case 1:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+ pattern_sel_shift = 12;
+ break;
+ case 2:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+ pattern_sel_shift = 20;
+ break;
+ default:
+ return;
+ }
+
+ DSI_W32(ctrl, init_offset, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, poly_offset, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+ pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+ bool enable)
+{
+ u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+ if (enable)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id)
+{
+ switch (stream_id) {
+ case 0:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+ break;
+ case 1:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+ break;
+ case 2:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
new file mode 100644
index 000000000000..028ad46664a7
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION (0x0000)
+#define DSI_CTRL (0x0004)
+#define DSI_STATUS (0x0008)
+#define DSI_FIFO_STATUS (0x000C)
+#define DSI_VIDEO_MODE_CTRL (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V (0x0028)
+#define DSI_VIDEO_MODE_TOTAL (0x002C)
+#define DSI_VIDEO_MODE_HSYNC (0x0030)
+#define DSI_VIDEO_MODE_VSYNC (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL (0x0044)
+#define DSI_DMA_CMD_OFFSET (0x0048)
+#define DSI_DMA_CMD_LENGTH (0x004C)
+#define DSI_DMA_FIFO_CTRL (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL (0x0064)
+#define DSI_ACK_ERR_STATUS (0x0068)
+#define DSI_RDBK_DATA0 (0x006C)
+#define DSI_RDBK_DATA1 (0x0070)
+#define DSI_RDBK_DATA2 (0x0074)
+#define DSI_RDBK_DATA3 (0x0078)
+#define DSI_RDBK_DATATYPE0 (0x007C)
+#define DSI_RDBK_DATATYPE1 (0x0080)
+#define DSI_TRIG_CTRL (0x0084)
+#define DSI_EXT_MUX (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER (0x0098)
+#define DSI_RESET_SW_TRIGGER (0x009C)
+#define DSI_MISR_CMD_CTRL (0x00A0)
+#define DSI_MISR_VIDEO_CTRL (0x00A4)
+#define DSI_LANE_STATUS (0x00A8)
+#define DSI_LANE_CTRL (0x00AC)
+#define DSI_LANE_SWAP_CTRL (0x00B0)
+#define DSI_DLN0_PHY_ERR (0x00B4)
+#define DSI_LP_TIMER_CTRL (0x00B8)
+#define DSI_HS_TIMER_CTRL (0x00BC)
+#define DSI_TIMEOUT_STATUS (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL (0x00C4)
+#define DSI_EOT_PACKET (0x00C8)
+#define DSI_EOT_PACKET_CTRL (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER (0x00D0)
+#define DSI_CAM_BIST_CTRL (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT (0x00E8)
+#define DSI_CAM_BIST_START (0x00EC)
+#define DSI_CAM_BIST_STATUS (0x00F0)
+#define DSI_ERR_INT_MASK0 (0x010C)
+#define DSI_INT_CTRL (0x0110)
+#define DSI_IOBIST_CTRL (0x0114)
+#define DSI_SOFT_RESET (0x0118)
+#define DSI_CLK_CTRL (0x011C)
+#define DSI_CLK_STATUS (0x0120)
+#define DSI_PHY_SW_RESET (0x012C)
+#define DSI_AXI2AHB_CTRL (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT (0x013C)
+#define DSI_MISR_VIDEO_32BIT (0x0140)
+#define DSI_LANE_MISR_CTRL (0x0144)
+#define DSI_LANE0_MISR (0x0148)
+#define DSI_LANE1_MISR (0x014C)
+#define DSI_LANE2_MISR (0x0150)
+#define DSI_LANE3_MISR (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1 (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2 (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER (0x0198)
+#define DSI_TPG_MAIN_CONTROL (0x019C)
+#define DSI_TPG_MAIN_CONTROL2 (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS (0x01A8)
+#define DSI_TPG_RECTANGLE (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES (0x01B0)
+#define DSI_TPG_RGB_MAPPING (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT (0x01C8)
+#define DSI_VBIF_CTRL (0x01CC)
+#define DSI_AES_CTRL (0x01D0)
+#define DSI_RDBK_DATA_CTRL (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2 (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER (0x01E0)
+#define DSI_DSI_TIMING_FLUSH (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET (0x01EC)
+#define DSI_SCRATCH_REGISTER_0 (0x01F0)
+#define DSI_VERSION (0x01F4)
+#define DSI_SCRATCH_REGISTER_1 (0x01F8)
+#define DSI_SCRATCH_REGISTER_2 (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2 (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3 (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS (0x02B8)
+#define DSI_DESKEW_CTRL (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
new file mode 100644
index 000000000000..5edfd5e62738
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN: Unknown version.
+ * @DSI_PHY_VERSION_1_0: 28nm-HPM.
+ * @DSI_PHY_VERSION_2_0: 28nm-LPM.
+ * @DSI_PHY_VERSION_3_0: 20nm.
+ * @DSI_PHY_VERSION_4_0: 14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+ DSI_PHY_VERSION_UNKNOWN,
+ DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+ DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+ DSI_PHY_VERSION_3_0, /* 20nm */
+ DSI_PHY_VERSION_4_0, /* 14nm */
+ DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY: Supports DPHY
+ * @DSI_PHY_CPHY: Supports CPHY
+ */
+enum dsi_phy_hw_features {
+ DSI_PHY_DPHY,
+ DSI_PHY_CPHY,
+ DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE: Clock is sourced from native PLL and is not
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE: Clock is sourced from native PLL and is
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE: Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+ DSI_PLL_SOURCE_STANDALONE = 0,
+ DSI_PLL_SOURCE_NATIVE,
+ DSI_PLL_SOURCE_NON_NATIVE,
+ DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane: A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+ u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+ u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg: Lane configuration settings.
+ * @strength: Strength settings for lanes.
+ * @timing: Timing parameters for lanes.
+ * @regulators: Regulator settings for lanes.
+ * @pll_source: PLL source.
+ */
+struct dsi_phy_cfg {
+ struct dsi_phy_per_lane_cfgs lanecfg;
+ struct dsi_phy_per_lane_cfgs strength;
+ struct dsi_phy_per_lane_cfgs timing;
+ struct dsi_phy_per_lane_cfgs regulators;
+ enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable: Enable PHY regulators.
+ * @regulator_disable: Disable PHY regulators.
+ * @enable: Enable PHY.
+ * @disable: Disable PHY.
+ * @calculate_timing_params: Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+ /**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+ void (*regulator_enable)(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+ /**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+ void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+ /**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+ int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *config,
+ struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base: VA for the DSI PHY base address.
+ * @length: Length of the DSI PHY register base map.
+ * @index: Instance ID of the controller.
+ * @version: DSI PHY version.
+ * @feature_map: Features supported by DSI PHY.
+ * @ops: Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+ void __iomem *base;
+ u32 length;
+ u32 index;
+
+ enum dsi_phy_version version;
+
+ DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+ struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
new file mode 100644
index 000000000000..512352d96f98
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0 0x0000
+#define DSIPHY_CMN_REVISION_ID1 0x0004
+#define DSIPHY_CMN_REVISION_ID2 0x0008
+#define DSIPHY_CMN_REVISION_ID3 0x000C
+#define DSIPHY_CMN_CLK_CFG0 0x0010
+#define DSIPHY_CMN_CLK_CFG1 0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+#define DSIPHY_CMN_CTRL_0 0x001C
+#define DSIPHY_CMN_CTRL_1 0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0 0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1 0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2 0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0 0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1 0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2 0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3 0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4 0x0044
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DSIPHY_CMN_LDO_CNTRL 0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n) (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n) (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n) (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n) (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n) (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n) (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n) (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n) (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n) (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n) (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n) (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n) (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n) (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n) (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041C
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value: Value to be programmed in register.
+ */
+struct timing_entry {
+ s32 mipi_min;
+ s32 mipi_max;
+ s32 rec_min;
+ s32 rec_max;
+ s32 rec;
+ u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+ struct timing_entry clk_prepare;
+ struct timing_entry clk_zero;
+ struct timing_entry clk_trail;
+ struct timing_entry hs_prepare;
+ struct timing_entry hs_zero;
+ struct timing_entry hs_trail;
+ struct timing_entry hs_rqst;
+ struct timing_entry hs_rqst_clk;
+ struct timing_entry hs_exit;
+ struct timing_entry ta_go;
+ struct timing_entry ta_sure;
+ struct timing_entry ta_set;
+ struct timing_entry clk_post;
+ struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+ u32 bitclk_mbps;
+ u32 escclk_numer;
+ u32 escclk_denom;
+ u32 tlpx_numer_ns;
+ u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+ int i;
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+ DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+ /* make sure all values are written to hardware */
+ wmb();
+
+ pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+ pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg)
+{
+ int i;
+ struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+ u32 data;
+
+ DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+ DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+ DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+ cfg->strength.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+ cfg->strength.lane[i][1]);
+ }
+
+ /* make sure all values are written to hardware before enabling phy */
+ wmb();
+
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+ udelay(100);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+ data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+ switch (cfg->pll_source) {
+ case DSI_PLL_SOURCE_STANDALONE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NON_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+ data |= BIT(2);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+ /* Enable bias current for pll1 during split display case */
+ if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+ DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+ pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+ pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+ 16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 *actual_frac,
+ s64 *actual_intermediate)
+{
+ u32 const min_prepare_frac = 50;
+ u64 const multiplier = BIT(20);
+
+ struct timing_entry *t = &desc->clk_prepare;
+ int rc = 0;
+ u64 dividend, temp, temp_multiple;
+ s32 frac = 0;
+ s64 intermediate;
+ s64 clk_prep_actual;
+
+ dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ /* calculate theoretical value */
+ temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+ * multiplier;
+ intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+ clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+ pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+ pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+ *actual_frac = frac;
+ *actual_intermediate = intermediate;
+
+ return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 actual_frac,
+ s64 actual_intermediate)
+{
+ u32 const clk_zero_min_frac = 2;
+ u64 const multiplier = BIT(20);
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_zero;
+ s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+ mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = (rec_temp1 - (11 * multiplier));
+ rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+ rec_min = (div_s64(rec_temp3, multiplier) - 3);
+ t->rec_min = rec_min;
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s64 *teot_clk_lane)
+{
+ u64 const multiplier = BIT(20);
+ u32 const phy_timing_frac = 30;
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_trail;
+ u64 temp_multiple;
+ s32 frac;
+ s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+ s64 teot_clk_lane1;
+
+ temp_multiple = div_s64(
+ (12 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+
+ mipi_max_tr = ((105 * multiplier) +
+ (temp_multiple + frac));
+ teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+ mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ temp_multiple = div_s64(
+ (t->mipi_min * multiplier * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = temp_multiple + frac + (3 * multiplier);
+ rec_temp2 = div_s64(rec_temp1, 8);
+ rec_temp3 = roundup(rec_temp2, multiplier);
+
+ t->rec_min = div_s64(rec_temp3, multiplier);
+
+ /* recommended max */
+ rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 + (3 * multiplier);
+ rec_temp3 = rec_temp2 / 8;
+ t->rec_max = div_s64(rec_temp3, multiplier);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * phy_timing_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ *teot_clk_lane = teot_clk_lane1;
+ pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 *temp_mul)
+{
+ u64 const multiplier = BIT(20);
+ u32 const min_prepare_frac = 50;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_prepare;
+ u64 temp_multiple, dividend, temp;
+ s32 frac;
+ s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+ u32 low_clk_multiplier = 0;
+
+ if (clk_params->bitclk_mbps <= 120)
+ low_clk_multiplier = 2;
+ /* mipi min */
+ temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_min = (40 * multiplier) + (temp_multiple + frac);
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ /* mipi_max */
+ temp_multiple = div_s64(
+ (6 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_max = (85 * multiplier) + temp_multiple;
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ /* recommended min */
+ temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+ t->rec_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended max */
+ temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+ t->rec_max = div_s64(rec_temp2, multiplier);
+
+ /* register value */
+ dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+ temp = roundup(div_u64(dividend, 100), multiplier);
+ t->rec = div_s64((temp + rec_temp1), multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ temp_multiple = div_s64(
+ (8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+
+ *temp_mul = temp_multiple;
+ pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 temp_multiple)
+{
+ u32 const hs_zero_min_frac = 10;
+ u64 const multiplier = BIT(20);
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_zero;
+ s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+ s64 rec_min;
+
+ mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+ clk_params->bitclk_mbps);
+ rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+ t->mipi_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended min */
+ rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 - (11 * multiplier);
+ rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+ rec_min = rec_temp3 - (3 * multiplier);
+ t->rec_min = div_s64(rec_min, multiplier);
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 teot_clk_lane)
+{
+ u32 const phy_timing_frac = 30;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_trail;
+ s64 rec_temp1;
+
+ t->mipi_min = 60 +
+ mult_frac(clk_params->tlpx_numer_ns, 4,
+ clk_params->bitclk_mbps);
+
+ t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+ t->rec_min = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns));
+ t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+ rec_temp1 = DIV_ROUND_UP(
+ ((t->rec_max - t->rec_min) * phy_timing_frac),
+ 100);
+ t->rec = rec_temp1 + t->rec_min;
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_trail\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u32 const hs_exit_min_frac = 10;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_exit;
+
+ t->rec_min = (DIV_ROUND_UP(
+ (t->mipi_min * clk_params->bitclk_mbps),
+ (8 * clk_params->tlpx_numer_ns)) - 1);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_exit\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst_clk;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ s32 actual_frac = 0;
+ s64 actual_intermediate = 0;
+ u64 temp_multiple;
+ s64 teot_clk_lane;
+
+ rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+ &actual_intermediate);
+ if (rc) {
+ pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+ if (rc) {
+ pr_err("clk_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+ if (rc) {
+ pr_err("clk_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+ if (rc) {
+ pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_zero(clk_params, desc, temp_multiple);
+ if (rc) {
+ pr_err("hs_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+ if (rc) {
+ pr_err("hs_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_exit(clk_params, desc);
+ if (rc) {
+ pr_err("hs_exit calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst_clk(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *host,
+ struct dsi_phy_per_lane_cfgs *timing)
+{
+ /* constants */
+ u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+ u32 const esc_clk_mmss_cc_prediv = 10;
+ u32 const tlpx_numer = 1000;
+ u32 const tr_eot = 20;
+ u32 const clk_prepare_spec_min = 38;
+ u32 const clk_prepare_spec_max = 95;
+ u32 const clk_trail_spec_min = 60;
+ u32 const hs_exit_spec_min = 100;
+ u32 const hs_exit_reco_max = 255;
+ u32 const hs_rqst_spec_min = 50;
+
+ /* local vars */
+ int rc = 0;
+ int i;
+ u32 h_total, v_total;
+ u64 inter_num;
+ u32 num_of_lanes = 0;
+ u32 bpp;
+ u64 x, y;
+ struct phy_timing_desc desc;
+ struct phy_clk_params clk_params = {0};
+
+ memset(&desc, 0x0, sizeof(desc));
+ h_total = DSI_H_TOTAL(mode);
+ v_total = DSI_V_TOTAL(mode);
+
+ bpp = bits_per_pixel[host->dst_format];
+
+ inter_num = bpp * mode->refresh_rate;
+
+ if (host->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+
+ x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+ y = rounddown(x, 1);
+
+ clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+ clk_params.escclk_numer = esc_clk_mhz;
+ clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+ clk_params.tlpx_numer_ns = tlpx_numer;
+ clk_params.treot_ns = tr_eot;
+
+
+ /* Setup default parameters */
+ desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+ desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+ desc.clk_trail.mipi_min = clk_trail_spec_min;
+ desc.hs_exit.mipi_min = hs_exit_spec_min;
+ desc.hs_exit.rec_max = hs_exit_reco_max;
+
+ desc.clk_prepare.rec_min = DIV_ROUND_UP(
+ (desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+ (8 * clk_params.tlpx_numer_ns)
+ );
+
+ desc.clk_prepare.rec_max = rounddown(
+ mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+ 1, (8 * clk_params.tlpx_numer_ns)),
+ 1);
+
+ desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+ desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+ pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+ clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+ clk_params.treot_ns);
+ rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+ if (rc) {
+ pr_err("Timing calc failed, rc=%d\n", rc);
+ goto error;
+ }
+
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ timing->lane[i][0] = desc.hs_exit.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][1] = desc.clk_zero.reg_value;
+ else
+ timing->lane[i][1] = desc.hs_zero.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][2] = desc.clk_prepare.reg_value;
+ else
+ timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][3] = desc.clk_trail.reg_value;
+ else
+ timing->lane[i][3] = desc.hs_trail.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+ else
+ timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+ timing->lane[i][5] = 0x3;
+ timing->lane[i][6] = 0x4;
+ timing->lane[i][7] = 0xA0;
+ pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+ timing->lane[i][1],
+ timing->lane[i][2],
+ timing->lane[i][3],
+ timing->lane[i][4]);
+ }
+ timing->count_per_lane = 8;
+
+error:
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..210cedb8134d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -595,7 +595,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ mmu = msm_smmu_new(&pdev->dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b88ce514eb8e..67c4518e22e1 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -235,13 +235,20 @@ static int msm_unload(struct drm_device *dev)
return 0;
}
+#define KMS_MDP4 0
+#define KMS_MDP5 1
+#define KMS_SDE 2
+
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
- .data = (void *)5,
- }, {
+ .data = (void *)KMS_MDP5,
+ },
+ {
+ .compatible = "qcom,sde-kms",
+ .data = (void *)KMS_SDE,
/* end node */
} };
struct device *dev = &pdev->dev;
@@ -250,7 +257,7 @@ static int get_mdp_ver(struct platform_device *pdev)
if (match)
return (int)(unsigned long)match->data;
#endif
- return 4;
+ return KMS_MDP4;
}
#include <linux/of_address.h>
@@ -369,12 +376,15 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
goto fail;
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(dev);
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(dev);
break;
+ case KMS_SDE:
+ kms = sde_kms_init(dev);
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -1140,6 +1150,7 @@ static const struct platform_device_id msm_id[] = {
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
+ { .compatible = "qcom,sde-kms" }, /* sde */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3be7a56b14f1..e4ebc0fa2f51 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -55,7 +55,12 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+#define MAX_CRTCS 8
+#define MAX_PLANES 12
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
@@ -128,19 +133,19 @@ struct msm_drm_private {
struct msm_mmu *mmus[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c76cc853b08a..6fa56abf0c78 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -295,16 +295,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
if (iommu_present(&platform_bus_type)) {
struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
if (WARN_ON(!mmu))
return -EINVAL;
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ if (obj->import_attach && mmu->funcs->map_dma_buf) {
+ ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
+ obj->import_attach->dmabuf,
+ DMA_BIDIRECTIONAL);
+ if (ret) {
+ DRM_ERROR("Unable to map dma buf\n");
+ return ret;
+ }
+ }
+ msm_obj->domain[id].iova =
+ sg_dma_address(msm_obj->sgt->sgl);
} else {
+ WARN_ONCE(1, "physical address being used\n");
msm_obj->domain[id].iova = physaddr(obj);
}
}
@@ -524,8 +531,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ if (obj->import_attach && mmu->funcs->unmap_dma_buf) {
+ mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
+ obj->import_attach->dmabuf,
+ DMA_BIDIRECTIONAL);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 6fc59bfeedeb..2e4ae6b1c5d0 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -53,8 +53,7 @@ struct msm_gem_object {
void *vaddr;
struct {
- // XXX
- uint32_t iova;
+ dma_addr_t iova;
} domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..f2e1a4fb9fae 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -76,5 +76,6 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+struct msm_kms *sde_kms_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 7cd88d9dc155..6d2f5627bfae 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -20,6 +20,14 @@
#include <linux/iommu.h>
+struct msm_mmu;
+struct msm_gpu;
+
+enum msm_mmu_domain_type {
+ MSM_SMMU_DOMAIN_UNSECURE,
+ MSM_SMMU_DOMAIN_MAX,
+};
+
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
@@ -27,6 +35,14 @@ struct msm_mmu_funcs {
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len);
+ int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir);
+ void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir);
+ int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir);
+ void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -44,5 +60,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
new file mode 100644
index 000000000000..d51fbedf90c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -0,0 +1,432 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+struct msm_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ bool domain_attached;
+};
+
+struct msm_smmu {
+ struct msm_mmu base;
+ struct device *client_dev;
+ struct msm_smmu_client client;
+};
+
+struct msm_smmu_domain {
+ const char *label;
+ size_t va_start;
+ size_t va_size;
+ bool secure;
+};
+
+#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
+#define msm_smmu_to_client(smmu) (&smmu->client)
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain);
+
+static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int rc = 0;
+
+ /* domain attach only once */
+ if (client->domain_attached)
+ return 0;
+
+ rc = arm_iommu_attach_device(client->dev,
+ client->mmu_mapping);
+ if (rc) {
+ dev_err(client->dev, "iommu attach dev failed (%d)\n",
+ rc);
+ return rc;
+ }
+
+ client->domain_attached = true;
+
+ dev_dbg(client->dev, "iommu domain attached\n");
+
+ return 0;
+}
+
+static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ DBG("detaching");
+}
+
+static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!client)
+ return -ENODEV;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int ret;
+
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
+ if (ret != sgt->nents)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ if (!client)
+ return -ENODEV;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+ WARN_ON(!PAGE_ALIGNED(bytes));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void msm_smmu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct platform_device *pdev = to_platform_device(smmu->client_dev);
+
+ platform_device_unregister(pdev);
+ kfree(smmu);
+}
+
+static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int ret;
+
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
+ dma_buf);
+ if (ret != sgt->nents) {
+ DRM_ERROR("dma map sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+
+static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_smmu_attach,
+ .detach = msm_smmu_detach,
+ .map = msm_smmu_map,
+ .map_sg = msm_smmu_map_sg,
+ .unmap_sg = msm_smmu_unmap_sg,
+ .unmap = msm_smmu_unmap,
+ .map_dma_buf = msm_smmu_map_dma_buf,
+ .unmap_dma_buf = msm_smmu_unmap_dma_buf,
+ .destroy = msm_smmu_destroy,
+};
+
+static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
+ [MSM_SMMU_DOMAIN_UNSECURE] = {
+ .label = "mdp_ns",
+ .va_start = SZ_1M,
+ .va_size = SZ_2G,
+ },
+};
+
+static const struct of_device_id msm_smmu_dt_match[] = {
+ { .compatible = "qcom,smmu_mdp_unsec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
+
+static struct device *msm_smmu_device_create(struct device *dev,
+ enum msm_mmu_domain_type domain,
+ struct msm_smmu *smmu)
+{
+ struct device_node *child;
+ struct platform_device *pdev;
+ int i;
+ const char *compat = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
+ if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
+ compat = msm_smmu_dt_match[i].compatible;
+ break;
+ }
+ }
+
+ if (!compat) {
+ DRM_ERROR("unable to find matching domain for %d\n", domain);
+ return ERR_PTR(-ENOENT);
+ }
+ DRM_INFO("found domain %d compat: %s\n", domain, compat);
+
+ if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
+ int rc;
+
+ smmu->client.dev = dev;
+ rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
+ msm_smmu_dt_match[i].data);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return NULL;
+ }
+
+ child = of_find_compatible_node(dev->of_node, NULL, compat);
+ if (!child) {
+ DRM_ERROR("unable to find compatible node for %s\n", compat);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_platform_device_create(child, NULL, dev);
+ if (!pdev) {
+ DRM_ERROR("unable to create smmu platform dev for domain %d\n",
+ domain);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return &pdev->dev;
+}
+
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain)
+{
+ struct msm_smmu *smmu;
+ struct device *client_dev;
+
+ smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
+ if (!smmu)
+ return ERR_PTR(-ENOMEM);
+
+ client_dev = msm_smmu_device_create(dev, domain, smmu);
+ if (IS_ERR(client_dev))
+ return (void *)client_dev ? : ERR_PTR(-ENODEV);
+
+ smmu->client_dev = client_dev;
+ msm_mmu_init(&smmu->base, dev, &funcs);
+
+ return &smmu->base;
+}
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain)
+{
+ int disable_htw = 1;
+ int rc;
+
+ client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
+ domain->va_start, domain->va_size);
+ if (IS_ERR(client->mmu_mapping)) {
+ dev_err(client->dev,
+ "iommu create mapping failed for domain=%s\n",
+ domain->label);
+ return PTR_ERR(client->mmu_mapping);
+ }
+
+ if (domain->secure) {
+ int secure_vmid = VMID_CP_PIXEL;
+
+ rc = iommu_domain_set_attr(client->mmu_mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+ if (rc) {
+ dev_err(client->dev, "couldn't set secure pix vmid\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ arm_iommu_release_mapping(client->mmu_mapping);
+ return rc;
+}
+
+/**
+ * msm_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registers the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+static int msm_smmu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct msm_smmu_client *client;
+ const struct msm_smmu_domain *domain;
+ int rc;
+
+ match = of_match_device(msm_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ domain = match->data;
+ if (!domain) {
+ dev_err(&pdev->dev, "no matching device found\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("probing device %s\n", match->compatible);
+
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+
+ rc = _msm_smmu_create_mapping(client, domain);
+ platform_set_drvdata(pdev, client);
+
+ return rc;
+}
+
+static int msm_smmu_remove(struct platform_device *pdev)
+{
+ struct msm_smmu_client *client;
+
+ client = platform_get_drvdata(pdev);
+ if (client->domain_attached) {
+ arm_iommu_detach_device(client->dev);
+ client->domain_attached = false;
+ }
+ arm_iommu_release_mapping(client->mmu_mapping);
+
+ return 0;
+}
+
+static struct platform_driver msm_smmu_driver = {
+ .probe = msm_smmu_probe,
+ .remove = msm_smmu_remove,
+ .driver = {
+ .name = "msmdrm_smmu",
+ .of_match_table = msm_smmu_dt_match,
+ },
+};
+
+static int __init msm_smmu_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&msm_smmu_driver);
+ if (ret)
+ pr_err("mdss_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+module_init(msm_smmu_driver_init);
+
+static void __exit msm_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&msm_smmu_driver);
+}
+module_exit(msm_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
new file mode 100644
index 000000000000..3c30267e7283
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -0,0 +1,610 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sort.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_mdp_ctl.h"
+
+#define CRTC_DUAL_MIXERS 2
+#define PENDING_FLIP 2
+
+#define CRTC_HW_MIXER_MAXSTAGES(c, idx) ((c)->mixer[idx].sblk->maxblendstages)
+
+struct sde_crtc_mixer {
+ struct sde_hw_dspp *hw_dspp;
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_ctl *hw_ctl;
+ u32 flush_mask;
+};
+
+struct sde_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ struct drm_plane *planes[8];
+ struct drm_encoder *encoder;
+ int id;
+ bool enabled;
+
+ spinlock_t lm_lock; /* protect registers */
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ struct sde_crtc_mixer mixer[CRTC_DUAL_MIXERS];
+
+ /*if there is a pending flip, these will be non-null */
+ struct drm_pending_vblank_event *event;
+};
+
+#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
+
+static struct sde_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_sde_kms(priv->kms);
+}
+
+static inline struct sde_hw_ctl *sde_crtc_rm_get_ctl_path(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ /*
+ * This module keeps track of the requested hw resources state,
+ * if the requested resource is being used it returns NULL,
+ * otherwise it returns the hw driver struct
+ */
+ return sde_hw_ctl_init(idx, addr, m);
+}
+
+static inline struct sde_hw_mixer *sde_crtc_rm_get_mixer(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ /*
+ * This module keeps track of the requested hw resources state,
+ * if the requested resource is being used it returns NULL,
+ * otherwise it returns the hw driver struct
+ */
+ return sde_hw_lm_init(idx, addr, m);
+}
+
+static int sde_crtc_reserve_hw_resources(struct drm_crtc *crtc,
+ struct drm_encoder *encoder)
+{
+ /*
+ * Assign CRTC resources
+ * num_ctls;
+ * num_mixers;
+ * sde_lm mixer[CRTC_MAX_PIPES];
+ * sde_ctl ctl[CRTC_MAX_PIPES];
+ */
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_kms *kms = get_kms(crtc);
+ enum sde_lm lm_id[CRTC_DUAL_MIXERS];
+ enum sde_ctl ctl_id[CRTC_DUAL_MIXERS];
+ int i;
+
+ if (!kms) {
+ DBG("[%s] invalid kms\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!kms->mmio)
+ return -EINVAL;
+
+ /*
+ * simple check validate against catalog
+ */
+ sde_crtc->num_ctls = 1;
+ sde_crtc->num_mixers = 1;
+ ctl_id[0] = CTL_0;
+ lm_id[0] = LM_0;
+
+ /*
+ * need to also enable MDP core clock and AHB CLK
+ * before touching HW driver
+ */
+ DBG("%s Enable clocks\n", __func__);
+ sde_enable(kms);
+ for (i = 0; i < sde_crtc->num_ctls; i++) {
+ sde_crtc->mixer[i].hw_ctl = sde_crtc_rm_get_ctl_path(ctl_id[i],
+ kms->mmio, kms->catalog);
+ if (!sde_crtc->mixer[i].hw_ctl) {
+ DBG("[%s], Invalid ctl_path", __func__);
+ return -EACCES;
+ }
+ }
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ sde_crtc->mixer[i].hw_lm = sde_crtc_rm_get_mixer(lm_id[i],
+ kms->mmio, kms->catalog);
+ if (!sde_crtc->mixer[i].hw_lm) {
+ DBG("[%s], Invalid ctl_path", __func__);
+ return -EACCES;
+ }
+ }
+ /*
+ * need to disable MDP core clock and AHB CLK
+ */
+ sde_disable(kms);
+ return 0;
+}
+
+static void sde_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ DBG("");
+ drm_crtc_cleanup(crtc);
+ kfree(sde_crtc);
+}
+
+static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DBG("");
+ return true;
+}
+
+static void sde_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_crtc_mixer *mixer = sde_crtc->mixer;
+ struct drm_device *dev = crtc->dev;
+ struct sde_hw_mixer *lm;
+ unsigned long flags;
+ struct drm_display_mode *mode;
+ struct sde_hw_mixer_cfg cfg;
+ u32 mixer_width;
+ int i;
+ int rc;
+
+ DBG("");
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ sde_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ /*
+ * reserve mixer(s) if not already avaialable
+ * if dual mode, mixer_width = half mode width
+ * program mode configuration on mixer(s)
+ */
+ if ((sde_crtc->num_ctls == 0) ||
+ (sde_crtc->num_mixers == 0)) {
+ rc = sde_crtc_reserve_hw_resources(crtc, sde_crtc->encoder);
+ if (rc) {
+ dev_err(dev->dev, " error reserving HW resource for this CRTC\n");
+ return;
+ }
+ }
+
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS)
+ mixer_width = mode->hdisplay >> 1;
+ else
+ mixer_width = mode->hdisplay;
+
+ spin_lock_irqsave(&sde_crtc->lm_lock, flags);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ lm = mixer[i].hw_lm;
+ cfg.out_width = mixer_width;
+ cfg.out_height = mode->vdisplay;
+ cfg.right_mixer = (i == 0) ? false : true;
+ cfg.flags = 0;
+ lm->ops.setup_mixer_out(lm, &cfg);
+ }
+
+ spin_unlock_irqrestore(&sde_crtc->lm_lock, flags);
+}
+
+static void sde_crtc_get_blend_cfg(struct sde_hw_blend_cfg *cfg,
+ struct sde_plane_state *pstate)
+{
+ const struct mdp_format *format;
+ struct drm_plane *plane;
+
+ format = to_mdp_format(
+ msm_framebuffer_format(pstate->base.fb));
+ plane = pstate->base.plane;
+
+ cfg->fg.alpha_sel = ALPHA_FG_CONST;
+ cfg->bg.alpha_sel = ALPHA_BG_CONST;
+ cfg->fg.const_alpha = pstate->alpha;
+ cfg->bg.const_alpha = 0xFF - pstate->alpha;
+
+ if (format->alpha_enable && pstate->premultiplied) {
+ cfg->fg.alpha_sel = ALPHA_FG_CONST;
+ cfg->bg.alpha_sel = ALPHA_FG_PIXEL;
+ if (pstate->alpha != 0xff) {
+ cfg->bg.const_alpha = pstate->alpha;
+ cfg->bg.inv_alpha_sel = 1;
+ cfg->bg.mod_alpha = 1;
+ } else {
+ cfg->bg.inv_mode_alpha = 1;
+ }
+ } else if (format->alpha_enable) {
+ cfg->fg.alpha_sel = ALPHA_FG_PIXEL;
+ cfg->bg.alpha_sel = ALPHA_FG_PIXEL;
+ if (pstate->alpha != 0xff) {
+ cfg->bg.const_alpha = pstate->alpha;
+ cfg->fg.mod_alpha = 1;
+ cfg->bg.inv_alpha_sel = 1;
+ cfg->bg.mod_alpha = 1;
+ cfg->bg.inv_mode_alpha = 1;
+ } else {
+ cfg->bg.inv_mode_alpha = 1;
+ }
+ }
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_crtc_mixer *mixer = sde_crtc->mixer;
+ struct drm_plane *plane;
+ struct sde_plane_state *pstate, *pstates[SDE_STAGE_MAX] = {0};
+ struct sde_hw_stage_cfg stage_cfg;
+ struct sde_hw_blend_cfg blend;
+ struct sde_hw_ctl *ctl;
+ struct sde_hw_mixer *lm;
+ u32 flush_mask = 0;
+ unsigned long flags;
+ int i, j, plane_cnt = 0;
+
+ spin_lock_irqsave(&sde_crtc->lm_lock, flags);
+
+ /* ctl could be reserved already */
+ if (!sde_crtc->num_ctls)
+ goto out;
+
+ /* initialize stage cfg */
+ memset(&stage_cfg, 0, sizeof(stage_cfg));
+ memset(&blend, 0, sizeof(blend));
+
+ /* Collect all plane information */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_sde_plane_state(plane->state);
+ pstates[pstate->stage] = pstate;
+ plane_cnt++;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ stage_cfg.stage[pstate->stage][i] =
+ sde_plane_pipe(plane);
+
+ /* Cache the flushmask for this layer
+ * sourcesplit is always enabled, so this layer will
+ * be staged on both the mixers
+ */
+ ctl = mixer[i].hw_ctl;
+ ctl->ops.get_bitmask_sspp(ctl, &flush_mask,
+ sde_plane_pipe(plane));
+ }
+ }
+
+ /*
+ * If there is no base layer, enable border color.
+ * currently border color is always black
+ */
+ if ((stage_cfg.stage[SDE_STAGE_BASE][0] == SSPP_NONE) &&
+ plane_cnt) {
+ stage_cfg.border_enable = 1;
+ DBG("Border Color is enabled\n");
+ }
+
+ /* Program hw */
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm)
+ continue;
+
+ if (!mixer[i].hw_ctl)
+ continue;
+
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ /* stage config */
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &stage_cfg);
+ /* stage config flush mask */
+ mixer[i].flush_mask = flush_mask;
+ /* get the flush mask for mixer */
+ ctl->ops.get_bitmask_mixer(ctl, &mixer[i].flush_mask,
+ mixer[i].hw_lm->idx);
+
+ /* blend config */
+ for (j = SDE_STAGE_0; j < SDE_STAGE_MAX; j++) {
+ if (!pstates[j])
+ continue;
+ sde_crtc_get_blend_cfg(&blend, pstates[j]);
+ blend.fg.alpha_sel = ALPHA_FG_CONST;
+ blend.bg.alpha_sel = ALPHA_BG_CONST;
+ blend.fg.const_alpha = pstate->alpha;
+ blend.bg.const_alpha = 0xFF - pstate->alpha;
+ lm->ops.setup_blend_config(lm, j, &blend);
+ }
+ }
+out:
+ spin_unlock_irqrestore(&sde_crtc->lm_lock, flags);
+}
+
+static void request_pending(struct drm_crtc *crtc, u32 pending)
+{
+ DBG("");
+}
+/**
+ * Flush the CTL PATH
+ */
+static u32 crtc_flush_all(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_hw_ctl *ctl;
+ int i;
+
+ DBG("");
+
+ for (i = 0; i < sde_crtc->num_ctls; i++) {
+ /*
+ * Query flush_mask from encoder
+ * and append to the ctl_path flush_mask
+ */
+ ctl = sde_crtc->mixer[i].hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl,
+ &(sde_crtc->mixer[i].flush_mask),
+ INTF_1);
+ ctl->ops.setup_flush(ctl,
+ sde_crtc->mixer[i].flush_mask);
+ }
+
+ return 0;
+}
+
+static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("");
+
+ WARN_ON(sde_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ sde_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no CTL has been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_ctls))
+ return;
+
+ blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ DBG("");
+
+ WARN_ON(sde_crtc->event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ sde_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ /*
+ * If no CTL has been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_ctls))
+ return;
+
+ crtc_flush_all(crtc);
+
+ request_pending(crtc, PENDING_FLIP);
+}
+
+static int sde_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ return -EINVAL;
+}
+
+static int sde_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ return 0;
+}
+
+static int sde_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ return 0;
+}
+
+static void sde_crtc_disable(struct drm_crtc *crtc)
+{
+ DBG("");
+}
+
+static void sde_crtc_enable(struct drm_crtc *crtc)
+{
+ DBG("");
+}
+
+struct plane_state {
+ struct drm_plane *plane;
+ struct sde_plane_state *state;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+
+ return pa->state->zpos - pb->state->zpos;
+}
+
+static int sde_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_kms *sde_kms = get_kms(crtc);
+ struct drm_plane *plane;
+ struct drm_device *dev = crtc->dev;
+ struct plane_state pstates[SDE_STAGE_MAX];
+ int max_stages = CRTC_HW_MIXER_MAXSTAGES(sde_kms->catalog, 0);
+ int cnt = 0, i;
+
+ DBG("%s: check", sde_crtc->name);
+
+ /* verify that there are not too many planes attached to crtc
+ * and that we don't have conflicting mixer stages:
+ */
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ struct drm_plane_state *pstate;
+
+ if (cnt >= (max_stages)) {
+ dev_err(dev->dev, "too many planes!\n");
+ return -EINVAL;
+ }
+
+ pstate = state->state->plane_states[drm_plane_index(plane)];
+
+ /* plane might not have changed, in which case take
+ * current state:
+ */
+ if (!pstate)
+ pstate = plane->state;
+ pstates[cnt].plane = plane;
+ pstates[cnt].state = to_sde_plane_state(pstate);
+
+ cnt++;
+ }
+
+ /* assign a stage based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ for (i = 0; i < cnt; i++) {
+ pstates[i].state->stage = SDE_STAGE_0 + i;
+ DBG("%s: assign pipe %d on stage=%d", sde_crtc->name,
+ sde_plane_pipe(pstates[i].plane),
+ pstates[i].state->stage);
+ }
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs sde_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = sde_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_property = sde_crtc_set_property,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .cursor_set = sde_crtc_cursor_set,
+ .cursor_move = sde_crtc_cursor_move,
+};
+
+static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
+ .mode_fixup = sde_crtc_mode_fixup,
+ .mode_set_nofb = sde_crtc_mode_set_nofb,
+ .disable = sde_crtc_disable,
+ .enable = sde_crtc_enable,
+ .atomic_check = sde_crtc_atomic_check,
+ .atomic_begin = sde_crtc_atomic_begin,
+ .atomic_flush = sde_crtc_atomic_flush,
+};
+
+uint32_t sde_crtc_vblank(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+}
+
+static void sde_crtc_install_properties(struct drm_crtc *crtc,
+ struct drm_mode_object *obj)
+{
+}
+
+
+/* initialize crtc */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_plane *plane, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct sde_crtc *sde_crtc;
+ int rc;
+
+ sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
+ if (!sde_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &sde_crtc->base;
+
+ sde_crtc->id = id;
+ sde_crtc->encoder = encoder;
+
+ sde_crtc_install_properties(crtc, &crtc->base);
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
+
+ drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ rc = sde_crtc_reserve_hw_resources(crtc, encoder);
+ if (rc) {
+ dev_err(dev->dev, " error reserving HW resource for this CRTC\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ DBG("%s: Successfully initialized crtc\n", __func__);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
new file mode 100644
index 000000000000..2a3bc3004e6c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -0,0 +1,523 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_mdp_ctl.h"
+#include "sde_mdp_formats.h"
+
+#include "sde_encoder_phys.h"
+#include "display_manager.h"
+
+#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_MDP_PORT0, \
+ .dst = MSM_BUS_SLAVE_EBI_CH0, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+static struct msm_bus_vectors mdp_bus_vectors[] = {
+ MDP_BUS_VECTOR_ENTRY(0, 0),
+ MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
+};
+
+static struct msm_bus_paths mdp_bus_usecases[] = { {
+ .num_paths = 1,
+ .vectors =
+ &mdp_bus_vectors[0],
+ }, {
+ .num_paths = 1,
+ .vectors =
+ &mdp_bus_vectors[1],
+ }
+};
+
+static struct msm_bus_scale_pdata mdp_bus_scale_table = {
+ .usecase = mdp_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
+ .name = "mdss_mdp",
+};
+
+static void bs_init(struct sde_encoder_virt *sde_enc)
+{
+ sde_enc->bus_scaling_client =
+ msm_bus_scale_register_client(&mdp_bus_scale_table);
+ DBG("bus scale client: %08x", sde_enc->bus_scaling_client);
+}
+
+static void bs_fini(struct sde_encoder_virt *sde_enc)
+{
+ if (sde_enc->bus_scaling_client) {
+ msm_bus_scale_unregister_client(sde_enc->bus_scaling_client);
+ sde_enc->bus_scaling_client = 0;
+ }
+}
+
+static void bs_set(struct sde_encoder_virt *sde_enc, int idx)
+{
+ if (sde_enc->bus_scaling_client) {
+ DBG("set bus scaling: %d", idx);
+ idx = 1;
+ msm_bus_scale_client_update_request(sde_enc->bus_scaling_client,
+ idx);
+ }
+}
+#else
+static void bs_init(struct sde_encoder_virt *sde_enc)
+{
+}
+
+static void bs_fini(struct sde_encoder_virt *sde_enc)
+{
+}
+
+static void bs_set(struct sde_encoder_virt *sde_enc, int idx)
+{
+}
+#endif
+
+void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct sde_encoder_hw_resources *hw_res)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ DBG("");
+
+ if (!hw_res || !drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.get_hw_resources)
+ phys->phys_ops.get_hw_resources(phys, hw_res);
+ }
+}
+
+static void sde_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ for (i = 0; i < ARRAY_SIZE(sde_enc->phys_encs); i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.destroy) {
+ phys->phys_ops.destroy(phys);
+ --sde_enc->num_phys_encs;
+ sde_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (sde_enc->num_phys_encs) {
+ DRM_ERROR("Expected num_phys_encs to be 0 not %d\n",
+ sde_enc->num_phys_encs);
+ }
+
+ drm_encoder_cleanup(drm_enc);
+ bs_fini(sde_enc);
+ kfree(sde_enc);
+}
+
+static bool sde_encoder_virt_mode_fixup(struct drm_encoder *drm_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+ bool ret = true;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return false;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.mode_fixup) {
+ ret =
+ phys->phys_ops.mode_fixup(phys, mode,
+ adjusted_mode);
+ if (!ret) {
+ DBG("Mode unsupported by phys_enc %d", i);
+ break;
+ }
+
+ if (sde_enc->num_phys_encs > 1) {
+ DBG("ModeFix only checking 1 phys_enc");
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.mode_set)
+ phys->phys_ops.mode_set(phys, mode, adjusted_mode);
+ }
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ bs_set(sde_enc, 1);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.enable)
+ phys->phys_ops.enable(phys);
+ }
+}
+
+static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->phys_ops.disable)
+ phys->phys_ops.disable(phys);
+ }
+
+ bs_set(sde_enc, 0);
+}
+
+static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
+ .mode_fixup = sde_encoder_virt_mode_fixup,
+ .mode_set = sde_encoder_virt_mode_set,
+ .disable = sde_encoder_virt_disable,
+ .enable = sde_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs sde_encoder_funcs = {
+ .destroy = sde_encoder_destroy,
+};
+
+static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
+ enum sde_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ DBG("");
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ unsigned long lock_flags;
+
+ DBG("");
+
+ if (!drm_enc) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
+ if (sde_enc->kms_vblank_callback)
+ sde_enc->kms_vblank_callback(sde_enc->kms_vblank_callback_data);
+ spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
+}
+
+static int sde_encoder_virt_add_phys_vid_enc(struct sde_encoder_virt *sde_enc,
+ struct sde_kms *sde_kms,
+ enum sde_intf intf_idx,
+ enum sde_ctl ctl_idx)
+{
+ int ret = 0;
+
+ DBG("");
+
+ if (sde_enc->num_phys_encs >= ARRAY_SIZE(sde_enc->phys_encs)) {
+ DRM_ERROR("Too many video encoders %d, unable to add\n",
+ sde_enc->num_phys_encs);
+ ret = -EINVAL;
+ } else {
+ struct sde_encoder_virt_ops parent_ops = {
+ sde_encoder_vblank_callback
+ };
+ struct sde_encoder_phys *enc =
+ sde_encoder_phys_vid_init(sde_kms, intf_idx, ctl_idx,
+ &sde_enc->base,
+ parent_ops);
+ if (IS_ERR(enc))
+ ret = PTR_ERR(enc);
+
+ if (!ret) {
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+ }
+
+ return ret;
+}
+
+static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
+ struct sde_kms *sde_kms,
+ struct display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum sde_intf_type intf_type = INTF_NONE;
+
+ DBG("");
+
+ if (disp_info->intf == DISPLAY_INTF_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf == DISPLAY_INTF_HDMI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else {
+ DRM_ERROR("Unsupported display interface type");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ DBG("dsi_info->num_of_h_tiles %d", disp_info->num_of_h_tiles);
+
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ enum sde_intf intf_idx = INTF_MAX;
+ enum sde_ctl ctl_idx = CTL_0;
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (intf_type == INTF_HDMI)
+ ctl_idx = CTL_2;
+
+ DBG("h_tile_instance %d = %d", i, controller_id);
+
+ intf_idx = sde_encoder_get_intf(sde_kms->catalog,
+ intf_type, controller_id);
+ if (intf_idx == INTF_MAX) {
+ DBG("Error: could not get the interface id");
+ ret = -EINVAL;
+ }
+
+ /* Create both VID and CMD Phys Encoders here */
+ if (!ret)
+ ret = sde_encoder_virt_add_phys_vid_enc(
+ sde_enc, sde_kms, intf_idx, ctl_idx);
+ }
+
+
+ return ret;
+}
+
+static struct drm_encoder *sde_encoder_virt_init(
+ struct drm_device *dev, struct display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct sde_encoder_virt *sde_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ DBG("");
+
+ sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
+ if (!sde_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ spin_lock_init(&sde_enc->spin_lock);
+ drm_enc = &sde_enc->base;
+ drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
+ drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
+ bs_init(sde_enc);
+
+ DBG("Created encoder");
+
+ return drm_enc;
+
+fail:
+ DRM_ERROR("Failed to create encoder\n");
+ if (drm_enc)
+ sde_encoder_destroy(drm_enc);
+
+ return ERR_PTR(ret);
+}
+
+void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*cb)(void *), void *data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+
+ DBG("");
+
+ spin_lock_irqsave(&sde_enc->spin_lock, lock_flags);
+ sde_enc->kms_vblank_callback = cb;
+ sde_enc->kms_vblank_callback_data = data;
+ spin_unlock_irqrestore(&sde_enc->spin_lock, lock_flags);
+}
+
+/* encoders init,
+ * initialize encoder based on displays
+ */
+void sde_encoders_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = NULL;
+ struct display_manager *disp_man = NULL;
+ u32 i = 0;
+ u32 num_displays = 0;
+
+ DBG("");
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+
+ priv = dev->dev_private;
+ priv->num_encoders = 0;
+ if (!priv->kms || !priv->dm) {
+ DRM_ERROR("Invalid pointer");
+ return;
+ }
+ disp_man = priv->dm;
+
+ num_displays = display_manager_get_count(disp_man);
+ DBG("num_displays %d", num_displays);
+
+ if (num_displays > ARRAY_SIZE(priv->encoders)) {
+ num_displays = ARRAY_SIZE(priv->encoders);
+ DRM_ERROR("Too many displays found, capping to %d",
+ num_displays);
+ }
+
+ for (i = 0; i < num_displays; i++) {
+ struct display_info info = { 0 };
+ struct drm_encoder *enc = NULL;
+ u32 ret = 0;
+
+ ret = display_manager_get_info_by_index(disp_man, i, &info);
+ if (ret) {
+ DRM_ERROR("Failed to get display info, %d", ret);
+ return;
+ }
+
+ enc = sde_encoder_virt_init(dev, &info);
+ if (IS_ERR_OR_NULL(enc)) {
+ DRM_ERROR("Encoder initialization failed");
+ return;
+ }
+
+ ret = display_manager_drm_init_by_index(disp_man, i, enc);
+ if (ret) {
+ DRM_ERROR("Display drm_init failed, %d", ret);
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = enc;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
new file mode 100644
index 000000000000..27fc11175c19
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_ENCODER_PHYS_H__
+#define __SDE_ENCODER_PHYS_H__
+
+#include "sde_kms.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_mdp_ctl.h"
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL 4
+
+struct sde_encoder_phys;
+
+struct sde_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *);
+};
+
+struct sde_encoder_phys_ops {
+ void (*mode_set)(struct sde_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ bool (*mode_fixup)(struct sde_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct sde_encoder_phys *encoder);
+ void (*disable)(struct sde_encoder_phys *encoder);
+ void (*destroy)(struct sde_encoder_phys *encoder);
+ void (*get_hw_resources)(struct sde_encoder_phys *encoder,
+ struct sde_encoder_hw_resources *hw_res);
+};
+
+struct sde_encoder_phys {
+ struct drm_encoder *parent;
+ struct sde_encoder_virt_ops parent_ops;
+ struct sde_encoder_phys_ops phys_ops;
+ struct sde_hw_intf *hw_intf;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_kms *sde_kms;
+ struct drm_display_mode cached_mode;
+ bool enabled;
+ spinlock_t spin_lock;
+};
+
+/**
+ * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @irq_idx: IRQ interface lookup index
+ * @vblank_complete: for vblank irq synchronization
+ */
+struct sde_encoder_phys_vid {
+ struct sde_encoder_phys base;
+ int irq_idx;
+ struct completion vblank_complete;
+};
+
+struct sde_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t spin_lock;
+ uint32_t bus_scaling_client;
+
+ int num_phys_encs;
+ struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+
+ void (*kms_vblank_callback)(void *);
+ void *kms_vblank_callback_data;
+};
+
+struct sde_encoder_phys *sde_encoder_phys_vid_init(struct sde_kms *sde_kms,
+ enum sde_intf intf_idx,
+ enum sde_ctl ctl_idx,
+ struct drm_encoder *parent,
+ struct sde_encoder_virt_ops
+ parent_ops);
+
+#endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
new file mode 100644
index 000000000000..693e1f33e7d8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_mdp_formats.h"
+
+#include "sde_encoder_phys.h"
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
new file mode 100644
index 000000000000..33d1a8eef7a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_encoder_phys.h"
+#include "sde_mdp_formats.h"
+
+#define VBLANK_TIMEOUT msecs_to_jiffies(100)
+
+#define to_sde_encoder_phys_vid(x) \
+ container_of(x, struct sde_encoder_phys_vid, base)
+
+static bool sde_encoder_phys_vid_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ bool ret = true;
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_wait_for_vblank(
+ struct sde_encoder_phys_vid *vid_enc)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = wait_for_completion_timeout(&vid_enc->vblank_complete,
+ VBLANK_TIMEOUT);
+ if (rc == 0)
+ DRM_ERROR("Timed out waiting for vblank irq\n");
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct sde_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DBG("Programmable fetch is not needed due to large vbp+vsw");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DBG("Less vfp than fetch requires, using entire vfp");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DBG("Room in vfp for needed prefetch");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DBG("v_front_porch %u v_back_porch %u vsync_pulse_width %u",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DBG("wc_lines %u needed_vfp_lines %u actual_vfp_lines %u",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DBG("vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
+ spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+}
+
+static bool sde_encoder_phys_vid_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ DBG("");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void sde_encoder_phys_vid_flush_intf(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_intf *intf = phys_enc->hw_intf;
+ struct sde_hw_ctl *ctl = phys_enc->hw_ctl;
+ u32 flush_mask = 0;
+
+ DBG("");
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.setup_flush(ctl, flush_mask);
+
+ DBG("Flushing CTL_ID %d, flush_mask %x, INTF %d",
+ ctl->idx, flush_mask, intf->idx);
+}
+
+static void sde_encoder_phys_vid_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ phys_enc->cached_mode = *adj_mode;
+ DBG("intf %d, caching mode:", phys_enc->hw_intf->idx);
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void sde_encoder_phys_vid_setup_timing_engine(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct drm_display_mode *mode = &phys_enc->cached_mode;
+ struct intf_timing_params p = { 0 };
+ struct sde_mdp_format_params *sde_fmt_params = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ u32 fmt_mod = 0;
+ unsigned long lock_flags;
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.setup_timing_gen))
+ return;
+
+ if (WARN_ON(!phys_enc->hw_ctl->ops.setup_intf_cfg))
+ return;
+
+ DBG("enable mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start, mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ drm_mode_to_intf_timing_params(phys_enc, mode, &p);
+
+ sde_fmt_params = sde_mdp_get_format_params(fmt_fourcc, fmt_mod);
+
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ intf_cfg.wb = SDE_NONE;
+
+ spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf, &p,
+ sde_fmt_params);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &p);
+}
+
+static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc = &vid_enc->base;
+
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent);
+
+ /* signal VBLANK completion */
+ complete_all(&vid_enc->vblank_complete);
+}
+
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ struct sde_irq_callback irq_cb;
+ int ret = 0;
+
+ vid_enc->irq_idx = sde_irq_idx_lookup(phys_enc->sde_kms,
+ SDE_IRQ_TYPE_INTF_VSYNC, phys_enc->hw_intf->idx);
+ if (vid_enc->irq_idx < 0) {
+ DRM_ERROR(
+ "Failed to lookup IRQ index for INTF_VSYNC with intf=%d\n",
+ phys_enc->hw_intf->idx);
+ return -EINVAL;
+ }
+
+ irq_cb.func = sde_encoder_phys_vid_vblank_irq;
+ irq_cb.arg = vid_enc;
+ ret = sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx,
+ &irq_cb);
+ if (ret) {
+ DRM_ERROR("Failed to register IRQ callback INTF_VSYNC\n");
+ return ret;
+ }
+
+ ret = sde_enable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR(
+ "Failed to enable IRQ for INTF_VSYNC, intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+ vid_enc->irq_idx = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx,
+ NULL);
+ return ret;
+ }
+
+ DBG("Registered IRQ for intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+
+ return ret;
+}
+
+static int sde_encoder_phys_vid_unregister_irq(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ sde_register_irq_callback(phys_enc->sde_kms, vid_enc->irq_idx, NULL);
+ sde_disable_irq(phys_enc->sde_kms, &vid_enc->irq_idx, 1);
+
+ DBG("Un-Register IRQ for intf %d, irq_idx=%d\n",
+ phys_enc->hw_intf->idx,
+ vid_enc->irq_idx);
+
+ return 0;
+}
+
+static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+{
+ int ret = 0;
+
+ DBG("");
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ sde_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ sde_encoder_phys_vid_flush_intf(phys_enc);
+
+ /* Register for interrupt unless we're the slave encoder */
+ if (sde_encoder_phys_vid_is_master(phys_enc))
+ ret = sde_encoder_phys_vid_register_irq(phys_enc);
+
+ if (!ret && !phys_enc->enabled) {
+ unsigned long lock_flags = 0;
+
+ /* Now enable timing engine */
+ spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
+ spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+
+ phys_enc->enabled = true;
+ }
+}
+
+static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ DBG("");
+
+ if (WARN_ON(!phys_enc->enabled))
+ return;
+
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
+ return;
+
+ spin_lock_irqsave(&phys_enc->spin_lock, lock_flags);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
+ reinit_completion(&vid_enc->vblank_complete);
+ spin_unlock_irqrestore(&phys_enc->spin_lock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ sde_encoder_phys_vid_wait_for_vblank(vid_enc);
+ sde_encoder_phys_vid_unregister_irq(phys_enc);
+ phys_enc->enabled = false;
+}
+
+static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ DBG("");
+ kfree(phys_enc->hw_intf);
+ kfree(vid_enc);
+}
+
+static void sde_encoder_phys_vid_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res)
+{
+ DBG("");
+ hw_res->intfs[phys_enc->hw_intf->idx] = true;
+}
+
+static void sde_encoder_phys_vid_init_cbs(struct sde_encoder_phys_ops *ops)
+{
+ ops->mode_set = sde_encoder_phys_vid_mode_set;
+ ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
+ ops->enable = sde_encoder_phys_vid_enable;
+ ops->disable = sde_encoder_phys_vid_disable;
+ ops->destroy = sde_encoder_phys_vid_destroy;
+ ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+ struct sde_kms *sde_kms,
+ enum sde_intf intf_idx,
+ enum sde_ctl ctl_idx,
+ struct drm_encoder *parent,
+ struct sde_encoder_virt_ops parent_ops)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_vid *vid_enc = NULL;
+ int ret = 0;
+
+ DBG("");
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ vid_enc->irq_idx = -EINVAL;
+ init_completion(&vid_enc->vblank_complete);
+
+ phys_enc = &vid_enc->base;
+
+ phys_enc->hw_intf =
+ sde_hw_intf_init(intf_idx, sde_kms->mmio, sde_kms->catalog);
+ if (!phys_enc->hw_intf) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc->hw_ctl = sde_hw_ctl_init(ctl_idx, sde_kms->mmio,
+ sde_kms->catalog);
+ if (!phys_enc->hw_ctl) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sde_encoder_phys_vid_init_cbs(&phys_enc->phys_ops);
+ phys_enc->parent = parent;
+ phys_enc->parent_ops = parent_ops;
+ phys_enc->sde_kms = sde_kms;
+ spin_lock_init(&phys_enc->spin_lock);
+
+ DBG("Created sde_encoder_phys_vid for intf %d", phys_enc->hw_intf->idx);
+
+ return phys_enc;
+
+fail:
+ DRM_ERROR("Failed to create encoder\n");
+ if (vid_enc)
+ sde_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
new file mode 100644
index 000000000000..118bb786da7e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+
+struct sde_mdss_hw_cfg_handler cfg_table[] = {
+ { .major = 1, .minor = 7, .cfg_init = sde_mdss_cfg_170_init},
+};
+
+/**
+ * sde_hw_catalog_init: Returns the catalog information for the
+ * passed HW version
+ * @major: Major version of the MDSS HW
+ * @minor: Minor version
+ * @step: step version
+ */
+struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_table); i++) {
+ if ((cfg_table[i].major == major) &&
+ (cfg_table[i].minor == minor))
+ return cfg_table[i].cfg_init(step);
+ }
+
+ return ERR_PTR(-ENODEV);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
new file mode 100644
index 000000000000..46972f2d5dfd
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CATALOG_H
+#define _SDE_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+
+#define MAX_BLOCKS 8
+#define MAX_LAYERS 12
+
+#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define SDE_HW_MAJOR(rev) ((rev) >> 28)
+#define SDE_HW_MINOR(rev) .(((rev) >> 16) & 0xFFF)
+#define SDE_HW_STEP(rev) ((rev) & 0xFFFF)
+#define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
+ (SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
+
+#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
+
+/**
+ * MDP TOP BLOCK features
+ * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @SDE_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @SDE_MDP_CDP, Client driven prefetch
+ * @SDE_MDP_MAX Maximum value
+
+ */
+enum {
+ SDE_MDP_PANIC_PER_PIPE = 0x1,
+ SDE_MDP_10BIT_SUPPORT,
+ SDE_MDP_BWC,
+ SDE_MDP_UBWC_1_0,
+ SDE_MDP_UBWC_1_5,
+ SDE_MDP_CDP,
+ SDE_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @SDE_SSPP_SRC Src and fetch part of the pipes,
+ * @SDE_SSPP_SCALAR_QSEED2, QSEED2 algorithm support
+ * @SDE_SSPP_SCALAR_QSEED3, QSEED3 algorithm support
+ * @SDE_SSPP_SCALAR_RGB, RGB Scalar, supported by RGB pipes
+ * @SDE_SSPP_CSC, Support of Color space conversion
+ * @SDE_SSPP_PA_V1, Common op-mode register for PA blocks
+ * @SDE_SSPP_HIST_V1 Histogram programming method V1
+ * @SDE_SSPP_IGC, Inverse gamma correction
+ * @SDE_SSPP_PCC, Color correction support
+ * @SDE_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @SDE_SSPP_MAX maximum value
+ */
+enum {
+ SDE_SSPP_SRC = 0x1,
+ SDE_SSPP_SCALAR_QSEED2,
+ SDE_SSPP_SCALAR_QSEED3,
+ SDE_SSPP_SCALAR_RGB,
+ SDE_SSPP_CSC,
+ SDE_SSPP_PA_V1, /* Common op-mode register for PA blocks */
+ SDE_SSPP_HIST_V1,
+ SDE_SSPP_IGC,
+ SDE_SSPP_PCC,
+ SDE_SSPP_CURSOR,
+ SDE_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @SDE_MIXER_LAYER Layer mixer layer blend configuration,
+ * @SDE_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @SDE_MIXER_GC Gamma correction block
+ * @SDE_MIXER_MAX maximum value
+ */
+enum {
+ SDE_MIXER_LAYER = 0x1,
+ SDE_MIXER_SOURCESPLIT,
+ SDE_MIXER_GC,
+ SDE_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @SDE_DSPP_IGC DSPP Inverse gamma correction block
+ * @SDE_DSPP_PCC Panel color correction block
+ * @SDE_DSPP_GC Gamma correction block
+ * @SDE_DSPP_PA Picture adjustment block
+ * @SDE_DSPP_GAMUT Gamut bloc
+ * @SDE_DSPP_DITHER Dither block
+ * @SDE_DSPP_HIST Histogram bloc
+ * @SDE_DSPP_MAX maximum value
+ */
+enum {
+ SDE_DSPP_IGC = 0x1,
+ SDE_DSPP_PCC,
+ SDE_DSPP_GC,
+ SDE_DSPP_PA,
+ SDE_DSPP_GAMUT,
+ SDE_DSPP_DITHER,
+ SDE_DSPP_HIST,
+ SDE_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @SDE_PINGPONG_TE Tear check block
+ * @SDE_PINGPONG_TE2 Additional tear check block for split pipes
+ * @SDE_PINGPONG_SPLIT PP block supports split fifo
+ * @SDE_PINGPONG_DSC, Display stream compression blocks
+ * @SDE_PINGPONG_MAX
+ */
+enum {
+ SDE_PINGPONG_TE = 0x1,
+ SDE_PINGPONG_TE2,
+ SDE_PINGPONG_SPLIT,
+ SDE_PINGPONG_DSC,
+ SDE_PINGPONG_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @SDE_WB_LINE_MODE Writeback module supports line/linear mode
+ * @SDE_WB_BLOCK_MODE Writeback module supports block mode read
+ * @SDE_WB_ROTATE rotation support,this is available if writeback
+ * supports block mode read
+ * @SDE_WB_CSC Writeback color conversion block support
+ * @SDE_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @SDE_WB_DOWNSCALE, Writeback integer downscaler,
+ * @SDE_WB_DITHER, Dither block
+ * @SDE_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @SDE_WB_UBWC_1_0, Writeback Universal bandwidth compression 1.0
+ * support
+ * @SDE_WB_WBWC_1_5 UBWC 1.5 support
+ * @SDE_WB_MAX maximum value
+ */
+enum {
+ SDE_WB_LINE_MODE = 0x1,
+ SDE_WB_BLOCK_MODE,
+ SDE_WB_ROTATE = SDE_WB_BLOCK_MODE,
+ SDE_WB_CSC,
+ SDE_WB_CHROMA_DOWN,
+ SDE_WB_DOWNSCALE,
+ SDE_WB_DITHER,
+ SDE_WB_TRAFFIC_SHAPER,
+ SDE_WB_UBWC_1_0,
+ SDE_WB_MAX
+};
+
+/**
+ * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+#define SDE_HW_BLK_INFO \
+ u32 id; \
+ u32 base; \
+ unsigned long features
+
+/**
+ * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define SDE_HW_SUBBLK_INFO \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct sde_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct sde_src_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_scalar_info: Scalar information
+ * @info: HW register and features supported by this sub-blk
+ */
+struct sde_scalar_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+struct sde_csc_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct sde_pp_blk {
+ SDE_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct sde_sspp_sub_blks : SSPP sub-blocks
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @danger_lut: LUT to generate danger signals
+ * @safe_lut: LUT to generate safe signals
+ * @src_blk:
+ * @scalar_blk:
+ * @csc_blk:
+ * @pa_blk:
+ * @hist_lut:
+ * @pcc_blk:
+ */
+struct sde_sspp_sub_blks {
+ u32 maxlinewidth;
+ u32 danger_lut;
+ u32 safe_lut;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ struct sde_src_blk src_blk;
+ struct sde_scalar_blk scalar_blk;
+ struct sde_pp_blk csc_blk;
+ struct sde_pp_blk pa_blk;
+ struct sde_pp_blk hist_lut;
+ struct sde_pp_blk pcc_blk;
+};
+
+/**
+ * struct sde_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ */
+struct sde_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+};
+
+struct sde_dspp_sub_blks {
+ struct sde_pp_blk igc;
+ struct sde_pp_blk pcc;
+ struct sde_pp_blk gc;
+ struct sde_pp_blk pa;
+ struct sde_pp_blk gamut;
+ struct sde_pp_blk dither;
+ struct sde_pp_blk hist;
+};
+
+struct sde_pingpong_sub_blks {
+ struct sde_pp_blk te;
+ struct sde_pp_blk te2;
+ struct sde_pp_blk dsc;
+};
+
+struct sde_wb_sub_blocks {
+ u32 maxlinewidth;
+};
+
+struct sde_mdss_base_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ */
+struct sde_mdp_cfg {
+ SDE_HW_BLK_INFO;
+ u32 highest_bank_bit;
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct sde_ctl_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * struct sde_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: Sub-blocks of SSPP
+ */
+struct sde_sspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_sspp_sub_blks *sblk;
+};
+
+/**
+ * struct sde_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: Sub-blocks of SSPP
+ */
+struct sde_lm_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_lm_sub_blks *sblk;
+};
+
+/**
+ * struct sde_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct sde_dspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct sde_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct sde_pingpong_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct sde_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Connects to which interfaces
+ * @wb_connect: Connects to which writebacks
+ */
+struct sde_cdm_cfg {
+ SDE_HW_BLK_INFO;
+ u32 intf_connect[MAX_BLOCKS];
+ u32 wb_connect[MAX_BLOCKS];
+};
+
+/**
+ * struct sde_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct sde_intf_cfg {
+ SDE_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct sde_wb_cfg - information of writeback blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct sde_wb_cfg {
+ SDE_HW_BLK_INFO;
+ struct sde_wb_sub_blocks *sblk;
+};
+
+/**
+ * struct sde_ad_cfg - information of Assertive Display blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ */
+struct sde_ad_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * struct sde_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ */
+struct sde_mdss_cfg {
+ u32 hwversion;
+
+ u32 mdss_count;
+ struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
+
+ u32 mdp_count;
+ struct sde_mdp_cfg mdp[MAX_BLOCKS];
+
+ u32 ctl_count;
+ struct sde_ctl_cfg ctl[MAX_BLOCKS];
+
+ u32 sspp_count;
+ struct sde_sspp_cfg sspp[MAX_LAYERS];
+
+ u32 mixer_count;
+ struct sde_lm_cfg mixer[MAX_BLOCKS];
+
+ u32 dspp_count;
+ struct sde_dspp_cfg dspp[MAX_BLOCKS];
+
+ u32 pingpong_count;
+ struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
+
+ u32 cdm_count;
+ struct sde_cdm_cfg cdm[MAX_BLOCKS];
+
+ u32 intf_count;
+ struct sde_intf_cfg intf[MAX_BLOCKS];
+
+ u32 wb_count;
+ struct sde_wb_cfg wb[MAX_BLOCKS];
+
+ u32 ad_count;
+ struct sde_ad_cfg ad[MAX_BLOCKS];
+ /* Add additional block data structures here */
+};
+
+struct sde_mdss_hw_cfg_handler {
+ u32 major;
+ u32 minor;
+ struct sde_mdss_cfg* (*cfg_init)(u32);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DSPP(s) ((s)->dspp)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_WB(s) ((s)->wb)
+#define BLK_AD(s) ((s)->ad)
+
+struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step);
+struct sde_mdss_cfg *sde_hw_catalog_init(u32 major, u32 minor, u32 step);
+
+#endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
new file mode 100644
index 000000000000..7fb5a0616838
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_8996.c
@@ -0,0 +1,303 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+
+/* VIG layer capability */
+#define VIG_17X_MASK \
+ (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALAR_QSEED2) |\
+ BIT(SDE_SSPP_CSC) | BIT(SDE_SSPP_PA_V1) |\
+ BIT(SDE_SSPP_HIST_V1) | BIT(SDE_SSPP_PCC) |\
+ BIT(SDE_SSPP_IGC))
+
+/* RGB layer capability */
+#define RGB_17X_MASK \
+ (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_SCALAR_RGB) |\
+ BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC))
+
+/* DMA layer capability */
+#define DMA_17X_MASK \
+ (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_PA_V1) |\
+ BIT(SDE_SSPP_PCC) | BIT(SDE_SSPP_IGC))
+
+/* Cursor layer capability */
+#define CURSOR_17X_MASK (BIT(SDE_SSPP_SRC) | BIT(SDE_SSPP_CURSOR))
+
+#define MIXER_17X_MASK (BIT(SDE_MIXER_SOURCESPLIT) |\
+ BIT(SDE_MIXER_GC))
+
+#define DSPP_17X_MASK \
+ (BIT(SDE_DSPP_IGC) | BIT(SDE_DSPP_PCC) |\
+ BIT(SDE_DSPP_GC) | BIT(SDE_DSPP_PA) | BIT(SDE_DSPP_GAMUT) |\
+ BIT(SDE_DSPP_DITHER) | BIT(SDE_DSPP_HIST))
+
+#define PINGPONG_17X_MASK \
+ (BIT(SDE_PINGPONG_TE) | BIT(SDE_PINGPONG_DSC))
+
+#define PINGPONG_17X_SPLIT_MASK \
+ (PINGPONG_17X_MASK | BIT(SDE_PINGPONG_SPLIT) |\
+ BIT(SDE_PINGPONG_TE2))
+
+#define WB01_17X_MASK \
+ (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_BLOCK_MODE) |\
+ BIT(SDE_WB_CSC) | BIT(SDE_WB_CHROMA_DOWN) | BIT(SDE_WB_DOWNSCALE) |\
+ BIT(SDE_WB_DITHER) | BIT(SDE_WB_TRAFFIC_SHAPER) |\
+ BIT(SDE_WB_UBWC_1_0))
+
+#define WB2_17X_MASK \
+ (BIT(SDE_WB_LINE_MODE) | BIT(SDE_WB_TRAFFIC_SHAPER))
+
+/**
+ * set_cfg_1xx_init(): populate sde sub-blocks reg offsets and instance counts
+ */
+static inline int set_cfg_1xx_init(struct sde_mdss_cfg *cfg)
+{
+
+ /* Layer capability */
+ static const struct sde_sspp_sub_blks layer = {
+ .maxlinewidth = 2560,
+ .danger_lut = 0xFFFF,
+ .safe_lut = 0xFF00,
+ .maxdwnscale = 4, .maxupscale = 20,
+ .src_blk = {.id = SDE_SSPP_SRC,
+ .base = 0x00, .len = 0x150,},
+ .scalar_blk = {.id = SDE_SSPP_SCALAR_QSEED2,
+ .base = 0x200, .len = 0x70,},
+ .csc_blk = {.id = SDE_SSPP_CSC,
+ .base = 0x320, .len = 0x44,},
+ .pa_blk = {.id = SDE_SSPP_PA_V1,
+ .base = 0x200, .len = 0x0,},
+ .hist_lut = {.id = SDE_SSPP_HIST_V1,
+ .base = 0xA00, .len = 0x400,},
+ .pcc_blk = {.id = SDE_SSPP_PCC,
+ .base = 0x1780, .len = 0x64,},
+ };
+
+ static const struct sde_sspp_sub_blks dma = {
+ .maxlinewidth = 2560,
+ .danger_lut = 0xFFFF,
+ .safe_lut = 0xFF00,
+ .maxdwnscale = 0, .maxupscale = 0,
+ .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x0,},
+ .scalar_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .pa_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .hist_lut = {.id = 0, .base = 0x00, .len = 0x0,},
+ .pcc_blk = {.id = SDE_SSPP_PCC, .base = 0x01780, .len = 0x64,},
+ };
+
+ static const struct sde_sspp_sub_blks cursor = {
+ .maxlinewidth = 128,
+ .danger_lut = 0xFFFF,
+ .safe_lut = 0xFF00,
+ .maxdwnscale = 0, .maxupscale = 0,
+ .src_blk = {.id = SDE_SSPP_SRC, .base = 0x00, .len = 0x0,},
+ .scalar_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .csc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .pa_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ .hist_lut = {.id = 0, .base = 0x00, .len = 0x0,},
+ .pcc_blk = {.id = 0, .base = 0x00, .len = 0x0,},
+ };
+
+ /* MIXER capability */
+ static const struct sde_lm_sub_blks lm = {
+ .maxwidth = 2560,
+ .maxblendstages = 7, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x50, 0x80, 0xB0, 0x230, 0x260, 0x290 }
+ };
+
+ /* DSPP capability */
+ static const struct sde_dspp_sub_blks pp = {
+ .igc = {.id = SDE_DSPP_GC, .base = 0x17c0, .len = 0x0,
+ .version = 0x1},
+ .pcc = {.id = SDE_DSPP_PCC, .base = 0x00, .len = 0x0,
+ .version = 0x1},
+ .gamut = {.id = SDE_DSPP_GAMUT, .base = 0x01600, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = SDE_DSPP_DITHER, .base = 0x00, .len = 0x0,
+ .version = 0x1},
+ .pa = {.id = SDE_DSPP_PA, .base = 0x00, .len = 0x0,
+ .version = 0x1},
+ .hist = {.id = SDE_DSPP_HIST, .base = 0x00, .len = 0x0,
+ .version = 0x1},
+ };
+
+ /* PINGPONG capability */
+ static const struct sde_pingpong_sub_blks p_p = {
+ .te = {.id = SDE_PINGPONG_TE, .base = 0x0000, .len = 0x0,
+ .version = 0x1},
+ .te2 = {.id = SDE_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dsc = {.id = SDE_PINGPONG_DSC, .base = 0x10000, .len = 0x0,
+ .version = 0x1},
+ };
+
+ /* Setup Register maps and defaults */
+ *cfg = (struct sde_mdss_cfg){
+ .mdss_count = 1,
+ .mdss = {
+ {.id = MDP_TOP, .base = 0x00000000, .features = 0}
+ },
+ .mdp_count = 1,
+ .mdp = {
+ {.id = MDP_TOP, .base = 0x00001000, .features = 0,
+ .highest_bank_bit = 0x2},
+ },
+ .ctl_count = 5,
+ .ctl = {
+ {.id = CTL_0, .base = 0x00002000},
+ {.id = CTL_1, .base = 0x00002200},
+ {.id = CTL_2, .base = 0x00002400},
+ {.id = CTL_3, .base = 0x00002600},
+ {.id = CTL_4, .base = 0x00002800},
+ },
+ /* 4 VIG, + 4 RGB + 2 DMA + 2 CURSOR */
+ .sspp_count = 12,
+ .sspp = {
+ {.id = SSPP_VIG0, .base = 0x00005000,
+ .features = VIG_17X_MASK, .sblk = &layer},
+ {.id = SSPP_VIG1, .base = 0x00007000,
+ .features = VIG_17X_MASK, .sblk = &layer},
+ {.id = SSPP_VIG2, .base = 0x00009000,
+ .features = VIG_17X_MASK, .sblk = &layer},
+ {.id = SSPP_VIG3, .base = 0x0000b000,
+ .features = VIG_17X_MASK, .sblk = &layer},
+
+ {.id = SSPP_RGB0, .base = 0x00015000,
+ .features = RGB_17X_MASK, .sblk = &layer},
+ {.id = SSPP_RGB1, .base = 0x00017000,
+ .features = RGB_17X_MASK, .sblk = &layer},
+ {.id = SSPP_RGB2, .base = 0x00019000,
+ .features = RGB_17X_MASK, .sblk = &layer},
+ {.id = SSPP_RGB3, .base = 0x0001B000,
+ .features = RGB_17X_MASK, .sblk = &layer},
+
+ {.id = SSPP_DMA0, .base = 0x00025000,
+ .features = DMA_17X_MASK, .sblk = &dma},
+ {.id = SSPP_DMA1, .base = 0x00027000,
+ .features = DMA_17X_MASK, .sblk = &dma},
+
+ {.id = SSPP_CURSOR0, .base = 0x00035000,
+ .features = CURSOR_17X_MASK, .sblk = &cursor},
+ {.id = SSPP_CURSOR1, .base = 0x00037000,
+ .features = CURSOR_17X_MASK, .sblk = &cursor},
+ },
+ .mixer_count = 6,
+ .mixer = {
+ {.id = LM_0, .base = 0x00045000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ {.id = LM_1, .base = 0x00046000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ {.id = LM_2, .base = 0x00047000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ {.id = LM_3, .base = 0x00048000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ {.id = LM_4, .base = 0x00049000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ {.id = LM_5, .base = 0x0004a000,
+ .features = MIXER_17X_MASK,
+ .sblk = &lm},
+ },
+ .dspp_count = 2,
+ .dspp = {
+ {.id = DSPP_0, .base = 0x00055000,
+ .features = DSPP_17X_MASK,
+ .sblk = &pp},
+ {.id = DSPP_1, .base = 0x00057000,
+ .features = DSPP_17X_MASK,
+ .sblk = &pp},
+ },
+ .pingpong_count = 4,
+ .pingpong = {
+ {.id = PINGPONG_0, .base = 0x00071000,
+ .features = PINGPONG_17X_SPLIT_MASK,
+ .sblk = &p_p},
+ {.id = PINGPONG_1, .base = 0x00071800,
+ .features = PINGPONG_17X_SPLIT_MASK,
+ .sblk = &p_p},
+ {.id = PINGPONG_2, .base = 0x00072000,
+ .features = PINGPONG_17X_MASK,
+ .sblk = &p_p},
+ {.id = PINGPONG_3, .base = 0x00072800,
+ .features = PINGPONG_17X_MASK,
+ .sblk = &p_p},
+ },
+ .cdm_count = 1,
+ .cdm = {
+ {.id = CDM_0, .base = 0x0007A200, .features = 0,
+ .intf_connect = { BIT(INTF_3)},
+ .wb_connect = { BIT(WB_2)},}
+ },
+ .intf_count = 4,
+ .intf = {
+ {.id = INTF_0, .base = 0x0006B000,
+ .type = INTF_NONE, .controller_id = 0,
+ .prog_fetch_lines_worst_case = 21},
+ {.id = INTF_1, .base = 0x0006B800,
+ .type = INTF_DSI, .controller_id = 0,
+ .prog_fetch_lines_worst_case = 21},
+ {.id = INTF_2, .base = 0x0006C000,
+ .type = INTF_DSI, .controller_id = 1,
+ .prog_fetch_lines_worst_case = 21},
+ {.id = INTF_3, .base = 0x0006C800,
+ .type = INTF_HDMI, .controller_id = 0,
+ .prog_fetch_lines_worst_case = 21},
+ },
+ .wb_count = 3,
+ .wb = {
+ {.id = WB_0, .base = 0x00065000,
+ .features = WB01_17X_MASK},
+ {.id = WB_1, .base = 0x00065800,
+ .features = WB01_17X_MASK},
+ {.id = WB_2, .base = 0x00066000,
+ .features = WB2_17X_MASK},
+ },
+ .ad_count = 2,
+ .ad = {
+ {.id = AD_0, .base = 0x00079000},
+ {.id = AD_1, .base = 0x00079800},
+ },
+ };
+ return 0;
+}
+
+/**
+ * sde_mdp_cfg_170_init(): Populate the sde sub-blocks catalog information
+ */
+struct sde_mdss_cfg *sde_mdss_cfg_170_init(u32 step)
+{
+ struct sde_mdss_cfg *m = NULL;
+
+ /*
+ * This function, for each sub-block sets,
+ * instance count, IO regions,
+ * default capabilities and this version capabilities,
+ * Additional catalog items
+ */
+
+ m = kzalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
+
+ set_cfg_1xx_init(m);
+ m->hwversion = SDE_HW_VER(1, 7, step);
+
+ return m;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
new file mode 100644
index 000000000000..25fc55191045
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -0,0 +1,296 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_cdm.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x204
+
+/**
+ * Horizontal coeffiecients for cosite chroma downscale
+ * s13 repesentation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->hwversion = m->hwversion;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx,
+ struct sde_csc_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ sde_hw_csc_setup(c, CDM_CSC_10_MATRIX_COEFF_0, data);
+}
+
+int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cdm)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ struct sde_mdp_format_params *fmt = cdm->output_fmt;
+ u32 opmode = 0;
+ u32 cdm_enable = 0;
+ u32 csc = 0;
+
+ if (!fmt->is_yuv)
+ return 0;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != SDE_MDP_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_enable |= BIT(19);
+ } else {
+ opmode = 0;
+ cdm_enable = BIT(24);
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ /* For this register we need to offset it to MDP TOP BLOCK */
+ SDE_REG_WRITE(c, MDP_OUT_CTL_0, cdm_enable);
+
+ SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+void sde_hw_cdm_disable(struct sde_hw_cdm *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ /* mdp top block */
+ SDE_REG_WRITE(c, MDP_OUT_CTL_0, 0); /* bypass mode */
+}
+
+static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = sde_hw_cdm_setup_cdwn;
+ ops->enable = sde_hw_cdm_enable;
+ ops->disable = sde_hw_cdm_disable;
+}
+
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_cdm *c;
+ struct sde_cdm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->cdm_hw_cap = cfg;
+ _setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
new file mode 100644
index 000000000000..ea19dc208c7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CDM_H
+#define _SDE_HW_CDM_H
+
+#include "sde_hw_mdss.h"
+
+struct sde_hw_cdm;
+
+struct sde_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ struct sde_mdp_format_params *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum sde_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum sde_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum sde_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct sde_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optinal to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ */
+ void (*setup_csc_data)(struct sde_hw_cdm *cdm,
+ struct sde_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct sde_hw_cdm *cdm);
+};
+
+struct sde_hw_cdm {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct sde_cdm_cfg *cdm_hw_cap;
+ enum sde_cdm idx;
+
+ /* ops */
+ struct sde_hw_cdm_ops ops;
+};
+
+/**
+ * sde_hw_cdm_init(): Initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+#endif /*_SDE_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
new file mode 100644
index 000000000000..e87ca9570443
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_dspp.h"
+
+static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->hwversion = m->hwversion;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+void sde_dspp_setup_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_read_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_update_igc(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_pa(struct sde_hw_dspp *dspp, void *cfg)
+{
+}
+
+void sde_dspp_setup_pcc(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_pa_memcolor(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_sixzone(struct sde_hw_dspp *dspp)
+{
+}
+
+void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_dither(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+static void _setup_dspp_ops(struct sde_hw_dspp_ops *ops,
+ unsigned long features)
+{
+}
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_dspp *c;
+ struct sde_dspp_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(&c->ops, c->cap->features);
+
+ return c;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
new file mode 100644
index 000000000000..eef4fbdff2a7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DSPP_H
+#define _SDE_HW_DSPP_H
+
+struct sde_hw_dspp;
+
+/**
+ * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_dspp_ops {
+ /**
+ * setup_histogram - setup dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * read_histogram - read dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * update_igc - update dspp igc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*update_igc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa - setup dspp pa
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sharpening - setup dspp sharpening
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa_memcolor - setup dspp memcolor
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sixzone - setup dspp six zone
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sixzone)(struct sde_hw_dspp *dspp);
+
+ /**
+ * setup_danger_safe - setup danger safe LUTS
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
+ /**
+ * setup_dither - setup dspp dither
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg);
+};
+
+/**
+ * struct sde_hw_dspp - dspp description
+ * @base_off: MDP register mapped offset
+ * @blk_off: DSPP offset relative to mdss offset
+ * @length Length of register block offset
+ * @hwversion Mdss hw version number
+ * @idx: DSPP index
+ * @dspp_hw_cap: Pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: Pointer to operations possible for this dspp
+ */
+struct sde_hw_dspp {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* dspp */
+ enum sde_dspp idx;
+ const struct sde_dspp_cfg *cap;
+
+ /* Ops */
+ struct sde_hw_dspp_ops ops;
+};
+
+/**
+ * sde_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ */
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+#endif /*_SDE_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
new file mode 100644
index 000000000000..99aa2e59dd85
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -0,0 +1,969 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_mdp_util.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDSS base
+ */
+#define HW_INTR_STATUS 0x0010
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define MDP_INTF_0_OFF 0x6B000
+#define MDP_INTF_1_OFF 0x6B800
+#define MDP_INTF_2_OFF 0x6C000
+#define MDP_INTF_3_OFF 0x6C800
+#define MDP_INTF_4_OFF 0x6D000
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define SDE_INTR_WB_0_DONE BIT(0)
+#define SDE_INTR_WB_1_DONE BIT(1)
+#define SDE_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
+#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
+#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
+#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
+#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_DONE BIT(8)
+#define SDE_INTR_PING_PONG_1_DONE BIT(9)
+#define SDE_INTR_PING_PONG_2_DONE BIT(10)
+#define SDE_INTR_PING_PONG_3_DONE BIT(11)
+#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
+#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
+#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
+#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
+#define SDE_INTR_INTF_0_VSYNC BIT(25)
+#define SDE_INTR_INTF_1_VSYNC BIT(27)
+#define SDE_INTR_INTF_2_VSYNC BIT(29)
+#define SDE_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
+#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
+#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
+#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
+#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
+#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
+#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
+#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
+#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define SDE_INTR_PROG_LINE BIT(8)
+
+/**
+ * struct sde_intr_reg - array of SDE register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct sde_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct sde_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in sde_intr_type
+ * @instance_idx: instance index of the associated HW block in SDE
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct sde_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of SDE interrupt registers
+ */
+static const struct sde_intr_reg sde_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct sde_irq_type sde_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_WR_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40-43 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_RD_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 44-47 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 72-75 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 76-79 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 84-87 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 88-91 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ SDE_INTR_VIDEO_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ SDE_INTR_VIDEO_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ SDE_INTR_VIDEO_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ SDE_INTR_VIDEO_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ SDE_INTR_VIDEO_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+};
+
+static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
+ if (intr_type == sde_irq_map[i].intr_type &&
+ instance_idx == sde_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ SDE_REG_WRITE(&intr->hw, reg_off, mask);
+}
+
+static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for sde_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the sde_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
+ (sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_interrupt_status(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "SDE IRQ already set:";
+ } else {
+ dbgstr = "SDE IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "SDE IRQ is already cleared:";
+ } else {
+ dbgstr = "SDE IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
+ return 0;
+}
+
+static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
+ uint32_t *mask)
+{
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+ return 0;
+}
+
+static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
+ uint32_t *sources)
+{
+ *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+ return 0;
+}
+
+static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
+ sde_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ sde_irq_map[irq_idx].irq_mask);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+}
+
+
+static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
+{
+ ops->set_mask = sde_hw_intr_set_mask;
+ ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
+ ops->enable_irq = sde_hw_intr_enable_irq;
+ ops->disable_irq = sde_hw_intr_disable_irq;
+ ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = sde_hw_intr_clear_irqs;
+ ops->disable_all_irqs = sde_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
+ ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+}
+
+static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
+ void __iomem *addr, struct sde_hw_blk_reg_map *hw)
+{
+ if (m->mdp_count == 0)
+ return NULL;
+
+ hw->base_off = addr;
+ hw->blk_off = m->mdss[0].base;
+ hw->hwversion = m->hwversion;
+ return &m->mdss[0];
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ struct sde_mdss_base_cfg *cfg;
+
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = __intr_offset(m, addr, &intr->hw);
+ if (!cfg) {
+ kfree(intr);
+ return ERR_PTR(-EINVAL);
+ }
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->mask_lock);
+ spin_lock_init(&intr->status_lock);
+
+ return intr;
+}
+
+void sde_hw_intr_destroy(struct sde_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
new file mode 100644
index 000000000000..0ddb1e78a953
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTERRUPTS_H
+#define _SDE_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdp_util.h"
+#include "sde_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * sde_intr_type - HW Interrupt Type
+ * @SDE_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @SDE_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @SDE_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @SDE_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @SDE_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @SDE_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @SDE_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @SDE_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @SDE_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @SDE_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @SDE_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @SDE_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum sde_intr_type {
+ SDE_IRQ_TYPE_WB_ROT_COMP,
+ SDE_IRQ_TYPE_WB_WFD_COMP,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ SDE_IRQ_TYPE_PING_PONG_WR_PTR,
+ SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
+ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ SDE_IRQ_TYPE_CWB_OVERFLOW,
+ SDE_IRQ_TYPE_HIST_VIG_DONE,
+ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ SDE_IRQ_TYPE_HIST_DSPP_DONE,
+ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ SDE_IRQ_TYPE_WD_TIMER,
+ SDE_IRQ_TYPE_SFI_VIDEO_IN,
+ SDE_IRQ_TYPE_SFI_VIDEO_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_0_IN,
+ SDE_IRQ_TYPE_SFI_CMD_0_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_1_IN,
+ SDE_IRQ_TYPE_SFI_CMD_1_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_2_IN,
+ SDE_IRQ_TYPE_SFI_CMD_2_OUT,
+ SDE_IRQ_TYPE_PROG_LINE,
+ SDE_IRQ_TYPE_RESERVED,
+};
+
+struct sde_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct sde_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct sde_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in sde_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum sde_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct sde_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within SDE. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct sde_hw_intr *intr,
+ uint32_t *mask);
+
+ /**
+ * get_interrupt_sources - Gets the bitmask of the SDE interrupt
+ * source that are currently fired.
+ * @intr: HW interrupt handle
+ * @sources: Returning the SDE interrupt source status bit mask
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_interrupt_sources)(
+ struct sde_hw_intr *intr,
+ uint32_t *sources);
+};
+
+/**
+ * struct sde_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @mask_lock: spinlock for accessing IRQ mask
+ * @status_lock: spinlock for accessing IRQ status
+ */
+struct sde_hw_intr {
+ struct sde_hw_blk_reg_map hw;
+ struct sde_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t mask_lock;
+ spinlock_t status_lock;
+};
+
+/**
+ * sde_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
new file mode 100644
index 000000000000..072cb6770bc8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->hwversion = m->hwversion;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
+ struct intf_timing_params *p,
+ struct sde_mdp_format_params *fmt)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!fmt->is_yuv)
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0,
+ vsync_period * hsync_period);
+ SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0,
+ display_v_start);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0,
+ display_v_end);
+ SDE_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0,
+ active_v_start);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0,
+ active_v_end);
+
+ SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR,
+ p->underflow_clr);
+ SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void sde_hw_intf_enable_timing_engine(
+ struct sde_hw_intf *intf,
+ u8 enable)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ u32 intf_sel;
+
+ /* Display interface select */
+ if (enable) {
+ intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
+
+ intf_sel |= (intf->cap->type << ((intf->idx) * 8));
+ SDE_REG_WRITE(c, DISP_INTF_SEL, intf_sel);
+ }
+
+ SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN,
+ enable & 0x1);
+}
+
+static void sde_hw_intf_setup_prg_fetch(
+ struct sde_hw_intf *intf,
+ struct intf_prog_fetch *fetch)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ SDE_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void sde_hw_intf_get_timing_config(
+ struct sde_hw_intf *intf,
+ struct intf_timing_params *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ u32 vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl;
+ u32 pulse_width;
+ u32 htotal, vtotal;
+ u32 intf_cfg;
+
+ hsync_ctl = SDE_REG_READ(c, INTF_HSYNC_CTL);
+ vsync_period = SDE_REG_READ(c, INTF_VSYNC_PERIOD_F0);
+ pulse_width = SDE_REG_READ(c, INTF_VSYNC_PULSE_WIDTH_F0);
+ display_hctl = SDE_REG_READ(c, INTF_DISPLAY_HCTL);
+ display_v_start = SDE_REG_READ(c, INTF_DISPLAY_V_START_F0);
+ display_v_end = SDE_REG_READ(c, INTF_DISPLAY_V_END_F0);
+ active_hctl = SDE_REG_READ(c, INTF_ACTIVE_HCTL);
+ active_v_start = SDE_REG_READ(c, INTF_ACTIVE_V_START_F0);
+ active_v_end = SDE_REG_READ(c, INTF_ACTIVE_V_END_F0);
+ intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
+ cfg->border_clr = SDE_REG_READ(c, INTF_BORDER_COLOR);
+ cfg->underflow_clr = SDE_REG_READ(c, INTF_UNDERFLOW_COLOR);
+ cfg->hsync_skew = SDE_REG_READ(c, INTF_HSYNC_SKEW);
+ polarity_ctl = SDE_REG_READ(c, INTF_POLARITY_CTL);
+
+ hsync_start_x = (display_hctl & 0xffff);
+ hsync_end_x = (display_hctl & 0xffff0000) >> 16;
+ cfg->hsync_pulse_width = (hsync_ctl & 0xffff);
+ htotal = (hsync_ctl & 0xffff0000) >> 16;
+
+ if (htotal != 0) {
+ vtotal = vsync_period / htotal;
+ cfg->vsync_pulse_width = pulse_width/htotal;
+
+ /* porches */
+ cfg->h_front_porch = htotal - hsync_end_x - 1;
+ cfg->h_back_porch = hsync_start_x - cfg->hsync_pulse_width;
+ cfg->v_front_porch = vsync_period - display_v_end;
+ cfg->v_back_porch = display_v_start - cfg->vsync_pulse_width;
+
+ /* active resolution */
+ cfg->width = htotal - cfg->hsync_pulse_width -
+ cfg->h_back_porch -
+ cfg->h_front_porch;
+ cfg->height = vtotal - cfg->vsync_pulse_width -
+ cfg->v_back_porch - cfg->v_front_porch;
+
+ /* display panel resolution */
+ if (intf_cfg & BIT(29))
+ cfg->xres = ((active_hctl & 0xffff0000) >> 16) -
+ (active_hctl & 0xffff) + 1;
+ else
+ cfg->xres = cfg->width;
+
+ if (intf_cfg & BIT(30))
+ cfg->yres = (active_v_end - active_v_start + 1
+ )/htotal;
+ else
+ cfg->yres = cfg->height;
+ } else {
+ cfg->vsync_pulse_width = 0;
+ cfg->h_front_porch = 0;
+ cfg->h_back_porch = 0;
+ cfg->v_front_porch = 0;
+ cfg->v_back_porch = 0;
+ cfg->width = 0;
+ cfg->height = 0;
+ }
+
+ cfg->hsync_polarity = polarity_ctl & 1;
+ cfg->vsync_polarity = (polarity_ctl & 2) >> 1;
+}
+
+static void sde_hw_intf_get_status(
+ struct sde_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = sde_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = sde_hw_intf_setup_prg_fetch;
+ ops->get_timing_gen = sde_hw_intf_get_timing_config;
+ ops->get_status = sde_hw_intf_get_status;
+ ops->enable_timing = sde_hw_intf_enable_timing_engine;
+}
+
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intf *c;
+ struct sde_intf_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("Error Panic\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ /*
+ * Perform any default initialization for the intf
+ */
+ return c;
+}
+
+void sde_hw_intf_deinit(struct sde_hw_intf *intf)
+{
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
new file mode 100644
index 000000000000..2dc8c52209f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTF_H
+#define _SDE_HW_INTF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_mdp_util.h"
+
+struct sde_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct sde_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_timing_gen: get timing generator programmed configuration
+ * @ get_status: returns if timing engine is enabled or not
+ */
+struct sde_hw_intf_ops {
+ void (*setup_timing_gen)(struct sde_hw_intf *intf,
+ struct intf_timing_params *p,
+ struct sde_mdp_format_params *fmt);
+
+ void (*setup_prg_fetch)(struct sde_hw_intf *intf,
+ struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct sde_hw_intf *intf,
+ u8 enable);
+
+ void (*get_timing_gen)(struct sde_hw_intf *intf,
+ struct intf_timing_params *cfg);
+
+ void (*get_status)(struct sde_hw_intf *intf,
+ struct intf_status *status);
+};
+
+struct sde_hw_intf {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* intf */
+ enum sde_intf idx;
+ const struct sde_intf_cfg *cap;
+
+ /* ops */
+ struct sde_hw_intf_ops ops;
+};
+
+/**
+ * sde_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+void sde_hw_intf_deinit(struct sde_hw_intf *intf);
+
+#endif /*_SDE_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
new file mode 100644
index 000000000000..03704ddf4980
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -0,0 +1,192 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+#include "sde_hwio.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_mdss.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->hwversion = m->hwversion;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+ const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+
+ if (WARN_ON(stage == SDE_STAGE_BASE))
+ return -EINVAL;
+
+ if ((stage - SDE_STAGE_0) <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - 1];
+ else
+ return -EINVAL;
+}
+
+static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *mixer)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 opmode;
+
+ opmode = SDE_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ opmode = (opmode & ~(1 << 31)) | (mixer->right_mixer & 1 << 31);
+ SDE_REG_WRITE(c, LM_OP_MODE, opmode);
+}
+
+static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void sde_hw_lm_setup_blendcfg(struct sde_hw_mixer *ctx,
+ int stage,
+ struct sde_hw_blend_cfg *blend)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 blend_op;
+ struct sde_hw_alpha_cfg *fg, *bg;
+ int stage_off;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ fg = &(blend->fg);
+ bg = &(blend->bg);
+
+ /* fg */
+ blend_op = (fg->alpha_sel & 3);
+ blend_op |= (fg->inv_alpha_sel & 1) << 2;
+ blend_op |= (fg->mod_alpha & 1) << 3;
+ blend_op |= (fg->inv_mode_alpha & 1) << 4;
+
+ /* bg */
+ blend_op |= (bg->alpha_sel & 3) << 8;
+ blend_op |= (bg->inv_alpha_sel & 1) << 2;
+ blend_op |= (bg->mod_alpha & 1) << 3;
+ blend_op |= (bg->inv_mode_alpha & 1) << 4;
+
+ SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off,
+ fg->const_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off,
+ bg->const_alpha);
+ SDE_REG_WRITE(c, LM_OP_MODE, blend_op);
+}
+
+static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
+ struct sde_hw_color3_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int maxblendstages = ctx->cap->sblk->maxblendstages;
+ int i;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+ for (i = 0; i < maxblendstages; i++)
+ op_mode |= ((cfg->keep_fg[i] & 0x1) << i);
+
+ SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_gammacorrection(struct sde_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void _setup_mixer_ops(struct sde_hw_lm_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_mixer_out = sde_hw_lm_setup_out;
+ ops->setup_blend_config = sde_hw_lm_setup_blendcfg;
+ ops->setup_alpha_out = sde_hw_lm_setup_color3;
+ ops->setup_border_color = sde_hw_lm_setup_border_color;
+ ops->setup_gammcorrection = sde_hw_lm_gammacorrection;
+};
+
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_mixer *c;
+ struct sde_lm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(&c->ops, c->cap->features);
+
+ /*
+ * Perform any default initialization for the sspp blocks
+ */
+ return c;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
new file mode 100644
index 000000000000..b48e8e1b8e94
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_LM_H
+#define _SDE_HW_LM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_mdp_util.h"
+
+struct sde_hw_mixer;
+
+struct sde_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct sde_hw_color3_cfg {
+ u8 keep_fg[SDE_STAGE_MAX];
+};
+
+/**
+ *
+ * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct sde_hw_mixer *ctx,
+ int stage,
+ struct sde_hw_blend_cfg *blend);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct sde_hw_mixer *ctx,
+ struct sde_hw_color3_cfg *cfg);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en);
+
+ void (*setup_gammcorrection)(struct sde_hw_mixer *mixer,
+ void *cfg);
+
+};
+
+struct sde_hw_mixer {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* lm */
+ enum sde_lm idx;
+ const struct sde_lm_cfg *cap;
+ const struct sde_mdp_cfg *mdp;
+ const struct sde_ctl_cfg *ctl;
+
+ /* ops */
+ struct sde_hw_lm_ops ops;
+};
+
+/**
+ * sde_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+#endif /*_SDE_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c
new file mode 100644
index 000000000000..f7181ab9ed3d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.c
@@ -0,0 +1,364 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "sde_hwio.h"
+#include "sde_hw_mdp_ctl.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PACK_3D 0x020
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define SDE_REG_RESET_TIMEOUT_COUNT 20
+
+static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->hwversion = m->hwversion;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
+ enum sde_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void sde_hw_ctl_force_start(struct sde_hw_ctl *ctx)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void sde_hw_ctl_setup_flush(struct sde_hw_ctl *ctx, u32 flushbits)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, flushbits);
+}
+
+static inline int sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_sspp sspp)
+{
+ switch (sspp) {
+ case SSPP_VIG0:
+ *flushbits |= BIT(0);
+ break;
+ case SSPP_VIG1:
+ *flushbits |= BIT(1);
+ break;
+ case SSPP_VIG2:
+ *flushbits |= BIT(2);
+ break;
+ case SSPP_VIG3:
+ *flushbits |= BIT(18);
+ break;
+ case SSPP_RGB0:
+ *flushbits |= BIT(3);
+ break;
+ case SSPP_RGB1:
+ *flushbits |= BIT(4);
+ break;
+ case SSPP_RGB2:
+ *flushbits |= BIT(5);
+ break;
+ case SSPP_RGB3:
+ *flushbits |= BIT(19);
+ break;
+ case SSPP_DMA0:
+ *flushbits |= BIT(11);
+ break;
+ case SSPP_DMA1:
+ *flushbits |= BIT(12);
+ break;
+ case SSPP_CURSOR0:
+ *flushbits |= BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ *flushbits |= BIT(23);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_lm lm)
+{
+ switch (lm) {
+ case LM_0:
+ *flushbits |= BIT(6);
+ break;
+ case LM_1:
+ *flushbits |= BIT(7);
+ break;
+ case LM_2:
+ *flushbits |= BIT(8);
+ break;
+ case LM_3:
+ *flushbits |= BIT(9);
+ break;
+ case LM_4:
+ *flushbits |= BIT(10);
+ break;
+ case LM_5:
+ *flushbits |= BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *flushbits |= BIT(17); /* CTL */
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_dspp dspp)
+{
+ switch (dspp) {
+ case DSPP_0:
+ *flushbits |= BIT(13);
+ break;
+ case DSPP_1:
+ *flushbits |= BIT(14);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int count = SDE_REG_RESET_TIMEOUT_COUNT;
+ int reset;
+
+ SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
+
+ for (; count > 0; count--) {
+ /* insert small delay to avoid spinning the cpu while waiting */
+ usleep_range(20, 50);
+ reset = SDE_REG_READ(c, CTL_SW_RESET);
+ if (reset == 0)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+ enum sde_lm lm,
+ struct sde_hw_stage_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg, mixercfg_ext = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (WARN_ON(stages < 0))
+ return;
+
+ if (test_bit(SDE_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = cfg->border_enable << 24; /* BORDER_OUT */
+
+ for (i = 0; i <= stages; i++) {
+ for (j = 0; j < pipes_per_stage; j++) {
+ switch (cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ mixercfg |= (i + 1) << 0;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= (i + 1) << 3;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 2;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= (i + 1) << 6;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 4;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= (i + 1) << 26;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 4;
+ break;
+ case SSPP_RGB0:
+ mixercfg |= (i + 1) << 9;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= (i + 1) << 12;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= (i + 1) << 15;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= (i + 1) << 29;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 14;
+ break;
+ case SSPP_DMA0:
+ mixercfg |= (i + 1) << 0;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0;
+ break;
+ case SSPP_DMA1:
+ mixercfg |= (i + 1) << 0;
+ mixercfg_ext |= ((i > SDE_STAGE_5) ? 1:0) << 0;
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= (i + 1) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= (i + 1) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+}
+
+static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 1) << 20;
+ }
+
+ SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_flush = sde_hw_ctl_setup_flush;
+ ops->setup_start = sde_hw_ctl_force_start;
+ ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
+ ops->reset = sde_hw_ctl_reset_control;
+ ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
+ ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
+};
+
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_ctl *c;
+ struct sde_ctl_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("Error Panic\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ return c;
+}
+
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
+{
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h
new file mode 100644
index 000000000000..d46064c57ba4
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_ctl.h
@@ -0,0 +1,137 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDP_CTL_H
+#define _SDE_HW_MDP_CTL_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_catalog.h"
+
+struct sde_hw_ctl;
+/**
+ * struct sde_hw_stage_cfg - blending stage cfg
+ * @stage
+ * @border_enable
+ */
+struct sde_hw_stage_cfg {
+ enum sde_sspp stage[SDE_STAGE_MAX][PIPES_PER_STAGE];
+ u8 border_enable;
+};
+
+/**
+ * struct sde_hw_intf_cfg :Desbribes how the mdp writes data to
+ * output interface
+ * @intf : Interface id
+ * @wb: writeback id
+ * @mode_3d: 3d mux configuration
+ */
+struct sde_hw_intf_cfg {
+ enum sde_intf intf;
+ enum sde_wb wb;
+ enum sde_3d_blend_mode mode_3d;
+};
+
+/**
+ * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*setup_start)(struct sde_hw_ctl *ctx);
+
+ /**
+ * FLUSH the modules for this control path
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*setup_flush)(struct sde_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg);
+
+ int (*reset)(struct sde_hw_ctl *c);
+
+ int (*get_bitmask_sspp)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_sspp blk);
+
+ int (*get_bitmask_mixer)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_lm blk);
+
+ int (*get_bitmask_dspp)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_dspp blk);
+
+ int (*get_bitmask_intf)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_intf blk);
+
+ int (*get_bitmask_cdm)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_cdm blk);
+
+ void (*setup_blendstage)(struct sde_hw_ctl *ctx,
+ enum sde_lm lm,
+ struct sde_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct sde_hw_ctl : CTL PATH driver object
+ * @struct sde_hw_blk_reg_map *hw;
+ * @idx
+ * @ctl_hw_caps
+ * @mixer_hw_caps
+ * @ops
+ */
+struct sde_hw_ctl {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct sde_ctl_cfg *caps;
+ int mixer_count;
+ const struct sde_lm_cfg *mixer_hw_caps;
+
+ /* ops */
+ struct sde_hw_ctl_ops ops;
+};
+
+/**
+ * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
+
+#endif /*_SDE_HW_MDP_CTL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_hwio.h
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c
new file mode 100644
index 000000000000..6be57b06a7cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdp_util.h"
+
+void sde_hw_reg_write(void __iomem *base, u32 blk_off, u32 reg_off, u32 val)
+{
+ writel_relaxed(val, base + blk_off + reg_off);
+}
+
+u32 sde_hw_reg_read(void __iomem *base, u32 blk_off, u32 reg_off)
+{
+ return readl_relaxed(base + blk_off + reg_off);
+}
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data)
+{
+ u32 val;
+
+ /* Matrix coeff */
+ val = (data->csc_mv[0] & 0x1FF) |
+ ((data->csc_mv[1] & 0x1FF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off, val);
+ val = (data->csc_mv[2] & 0x1FF) |
+ ((data->csc_mv[3] & 0x1FF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = (data->csc_mv[4] & 0x1FF) |
+ ((data->csc_mv[5] & 0x1FF) >> 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = (data->csc_mv[6] & 0x1FF) |
+ ((data->csc_mv[7] & 0x1FF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = data->csc_mv[8] & 0x1FF;
+ SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << 8) | data->csc_pre_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << 8) | data->csc_pre_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << 8) | data->csc_pre_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << 8) | data->csc_post_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << 8) | data->csc_post_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << 8) | data->csc_post_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h
new file mode 100644
index 000000000000..b57e64eba423
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdp_util.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDP_UTIL_H
+#define _SDE_HW_MDP_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "sde_hw_mdss.h"
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @hwversion mdss hw version number
+ */
+struct sde_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 hwversion;
+};
+
+void sde_hw_reg_write(void __iomem *base, u32 blk_offset, u32 reg, u32 val);
+
+u32 sde_hw_reg_read(void __iomem *base, u32 blk_offset, u32 reg);
+
+static inline void SDE_REG_WRITE(struct sde_hw_blk_reg_map *c, u32 reg_off,
+ u32 val)
+{
+ sde_hw_reg_write(c->base_off, c->blk_off, reg_off, val);
+}
+
+static inline int SDE_REG_READ(struct sde_hw_blk_reg_map *c, u32 reg_off)
+{
+ return sde_hw_reg_read(c->base_off, c->blk_off, reg_off);
+}
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data);
+
+#endif /* _SDE_HW_MDP_UTIL_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
new file mode 100644
index 000000000000..075e78042f17
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -0,0 +1,358 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDSS_H
+#define _SDE_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#define SDE_NONE 0
+#define SDE_CSC_MATRIX_COEFF_SIZE 9
+#define SDE_CSC_CLAMP_SIZE 6
+#define SDE_CSC_BIAS_SIZE 3
+
+#define SDE_MAX_PLANES 4
+#define PIPES_PER_STAGE 2
+#define VALID_ROT_WB_FORMAT BIT(0)
+
+enum sde_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum sde_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum sde_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum sde_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum sde_stage {
+ SDE_STAGE_BASE = 0,
+ SDE_STAGE_0,
+ SDE_STAGE_1,
+ SDE_STAGE_2,
+ SDE_STAGE_3,
+ SDE_STAGE_4,
+ SDE_STAGE_5,
+ SDE_STAGE_6,
+ SDE_STAGE_MAX
+};
+enum sde_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum sde_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum sde_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum sde_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum sde_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum sde_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_TYPE_MAX
+};
+
+enum sde_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum sde_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum sde_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum sde_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum sde_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+/**
+ * MDP HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum sde_mdp_plane_type - defines how the color component pixel packing
+ * @SDE_MDP_PLANE_INTERLEAVED : Color components in single plane
+ * @SDE_MDP_PLANE_PLANAR : Color component in separate planes
+ * @SDE_MDP_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate
+ * plane
+ */
+enum sde_mdp_plane_type {
+ SDE_MDP_PLANE_INTERLEAVED,
+ SDE_MDP_PLANE_PLANAR,
+ SDE_MDP_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum sde_mdp_chroma_samp_type - chroma sub-samplng type
+ * @SDE_MDP_CHROMA_RGB : no chroma subsampling
+ * @SDE_MDP_CHROMA_H2V1 : chroma pixels are horizontally subsampled
+ * @SDE_MDP_CHROMA_H1V2 : chroma pixels are vertically subsampled
+ * @SDE_MDP_CHROMA_420 : 420 subsampling
+ */
+enum sde_mdp_chroma_samp_type {
+ SDE_MDP_CHROMA_RGB,
+ SDE_MDP_CHROMA_H2V1,
+ SDE_MDP_CHROMA_H1V2,
+ SDE_MDP_CHROMA_420
+};
+
+/**
+ * enum sde_mdp_fetch_type - format id, used by drm-driver only to map drm forcc
+ * Defines How MDP HW fetches data
+ * @SDE_MDP_FETCH_LINEAR : fetch is line by line
+ * @SDE_MDP_FETCH_TILE : fetches data in Z order from a tile
+ * @SDE_MDP_FETCH_UBWC : fetch and decompress data
+ */
+enum sde_mdp_fetch_type {
+ SDE_MDP_FETCH_LINEAR,
+ SDE_MDP_FETCH_TILE,
+ SDE_MDP_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_1BIT = 0,
+ COLOR_5BIT = 1,
+ COLOR_6BIT = 2,
+ COLOR_8BIT = 3,
+};
+
+enum sde_alpha_blend_type {
+ ALPHA_FG_CONST = 0,
+ ALPHA_BG_CONST,
+ ALPHA_FG_PIXEL,
+ ALPHA_BG_PIXEL,
+ ALPHA_MAX
+};
+
+
+/**
+ * enum sde_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum sde_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+struct addr_info {
+ u32 plane[SDE_MAX_PLANES];
+};
+
+/**
+ * struct sde_mdp_format_params - defines the format configuration which
+ * allows MDP HW to correctly fetch and decode the format
+ * @format : format id, used by drm-driver only to map drm forcc
+ * @flag
+ * @chroma_sample
+ * @fetch_planes
+ * @unpack_align_msb
+ * @unpack_tight
+ * @unpack_count
+ * @bpp
+ * @alpha_enable
+ * @fetch_mode
+ * @bits
+ * @element
+ */
+struct sde_mdp_format_params {
+ u32 format;
+ enum sde_mdp_plane_type fetch_planes;
+ u8 element[SDE_MAX_PLANES];
+ u8 bits[SDE_MAX_PLANES];
+ enum sde_mdp_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb; /* 0 to LSB, 1 to MSB */
+ u8 unpack_tight; /* 0 for loose, 1 for tight */
+ u8 unpack_count; /* 0 = 1 component, 1 = 2 component ... */
+ u8 bpp; /* Bytes per pixel */
+ u8 alpha_enable; /* source has alpha */
+ enum sde_mdp_fetch_type fetch_mode;
+ u8 is_yuv;
+ u32 flag;
+};
+
+/**
+ * struct sde_hw_source_info - format information of the source pixel data
+ * @format : pixel format parameters
+ * @width : image width @height: image height
+ * @num_planes : number of planes including the meta data planes for the
+ * compressed formats @plane: per plane information
+ */
+struct sde_hw_source_info {
+ struct sde_mdp_format_params *format;
+ u32 width;
+ u32 height;
+ u32 num_planes;
+ u32 ystride[SDE_MAX_PLANES];
+};
+
+struct sde_rect {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct sde_hw_alpha_cfg {
+ u32 const_alpha;
+ enum sde_alpha_blend_type alpha_sel;
+ u8 inv_alpha_sel;
+ u8 mod_alpha;
+ u8 inv_mode_alpha;
+};
+
+struct sde_hw_blend_cfg {
+ struct sde_hw_alpha_cfg fg;
+ struct sde_hw_alpha_cfg bg;
+};
+
+struct sde_csc_cfg {
+ uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct sde_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct sde_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+#endif /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
new file mode 100644
index 000000000000..6bee52fd670d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -0,0 +1,159 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_pingpong.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
+
+static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->hwversion = m->hwversion;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *te)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ int cfg;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
+ SDE_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ SDE_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ u32 refresh_cfg;
+
+ if (cfg->enable)
+ refresh_cfg = BIT(31) | cfg->frame_count;
+ else
+ refresh_cfg = 0;
+
+ SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG,
+ refresh_cfg);
+
+ return 0;
+}
+
+int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg)
+{
+ return 0;
+}
+int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+
+ SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+
+ info->init_val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xffff;
+ info->vsync_count = SDE_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xffff;
+ info->line_count = SDE_REG_READ(c, PP_INT_COUNT_VAL) & 0xffff;
+
+ return 0;
+}
+
+static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_tearcheck = sde_hw_pp_setup_te_config;
+ ops->enable_tearcheck = sde_hw_pp_enable_te;
+ ops->get_vsync_info = sde_hw_pp_get_vsync_info;
+ ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
+ ops->setup_dsc = sde_hw_pp_setup_dsc_compression;
+};
+
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_pingpong *c;
+ struct sde_pingpong_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->pingpong_hw_cap = cfg;
+ _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features);
+
+ return c;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
new file mode 100644
index 000000000000..7cb4cf184e48
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_PINGPONG_H
+#define _SDE_HW_PINGPONG_H
+
+struct sde_hw_pingpong;
+
+struct sde_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct sde_hw_autorefresh {
+ bool enable;
+ u32 frame_count;
+};
+
+struct sde_hw_pp_vsync_info {
+ u32 init_val; /* value of rd pointer at vsync edge */
+ u32 vsync_count; /* mdp clocks to complete one line */
+ u32 line_count; /* current line count */
+};
+
+struct sde_hw_dsc_cfg {
+ u8 enable;
+};
+
+/**
+ *
+ * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck :
+ * @enable_tearcheck :
+ * @get_vsync_info :
+ * @setup_autorefresh :
+ * #setup_dsc :
+ */
+struct sde_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct sde_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info);
+
+ /**
+ * configure and enable the autorefresh config
+ */
+ int (*setup_autorefresh)(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg);
+
+ /**
+ * Program the dsc compression block
+ */
+ int (*setup_dsc)(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg);
+};
+
+struct sde_hw_pingpong {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum sde_pingpong idx;
+ const struct sde_pingpong_cfg *pingpong_hw_cap;
+
+ /* ops */
+ struct sde_hw_pingpong_ops ops;
+};
+
+/**
+ * sde_hw_pingpong_init(): Initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: pingpong index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+#endif /*_SDE_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
new file mode 100644
index 000000000000..9a3d25423b8a
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -0,0 +1,589 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_sspp.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_lm.h"
+
+#define SDE_MDP_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_VIG_OP_MODE 0x200
+
+/* SDE_SSPP_SCALAR_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+#define VIG_0_CSC_1_MATRIX_COEFF_0 0x20
+#define VIG_0_CSC_1_COMP_0_PRE_CLAMP 0x34
+#define VIG_0_CSC_1_COMP_0_POST_CLAMP 0x40
+#define VIG_0_CSC_1_COMP_0_PRE_BIAS 0x4C
+#define VIG_0_CSC_1_COMP_0_POST_BIAS 0x60
+
+/*
+ * MDP Solid fill configuration
+ * argb8888
+ */
+#define SSPP_SOLID_FILL 0x4037ff
+
+enum {
+ CSC = 0x1,
+ PA,
+ HIST,
+ SKIN_COL,
+ FOIL,
+ SKY_COL,
+ MEM_PROT_HUE,
+ MEM_PROT_SAT,
+ MEM_PROT_VAL,
+ MEM_PROT_CONT,
+ MEM_PROT_BLEND,
+ PA_SAT_ADJ
+};
+
+static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ switch (s_id) {
+ case SDE_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case SDE_SSPP_SCALAR_QSEED2:
+ case SDE_SSPP_SCALAR_QSEED3:
+ case SDE_SSPP_SCALAR_RGB:
+ *idx = sblk->scalar_blk.base;
+ break;
+ case SDE_SSPP_CSC:
+ *idx = sblk->csc_blk.base;
+ break;
+ case SDE_SSPP_PA_V1:
+ *idx = sblk->pa_blk.base;
+ break;
+ case SDE_SSPP_HIST_V1:
+ *idx = sblk->hist_lut.base;
+ break;
+ case SDE_SSPP_PCC:
+ *idx = sblk->pcc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ pr_err("Unsupported SSPP sub-blk for this hw\n");
+ }
+
+ return rc;
+}
+
+static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
+ u32 op, u8 en)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 idx;
+ u32 opmode;
+
+ if (ctx->cap->features == SDE_SSPP_PA_V1) {
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ opmode = SDE_REG_READ(c, SSPP_VIG_OP_MODE + idx);
+
+ /* ops */
+ switch (op) {
+ case CSC:
+ if (en)
+ /* CSC_1_EN and CSC_SRC_DATA_FORMAT*/
+ opmode |= BIT(18) | BIT(17);
+ else
+ opmode &= ~BIT(17);
+ break;
+ default:
+ pr_err(" Unsupported operation\n");
+ }
+ SDE_REG_WRITE(c, SSPP_VIG_OP_MODE + idx, opmode);
+ }
+}
+/**
+ * Setup source pixel format, flip,
+ */
+static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ u32 flags)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ struct sde_mdp_format_params *fmt;
+ u32 chroma_samp, unpack, src_format;
+ u32 secure = 0;
+ u32 opmode = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
+
+ /* format info */
+ fmt = cfg->src.format;
+ if (WARN_ON(!fmt))
+ return;
+
+ if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
+ if (flags & SDE_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & SDE_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == SDE_MDP_CHROMA_H2V1)
+ chroma_samp = SDE_MDP_CHROMA_H1V2;
+ else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
+ chroma_samp = SDE_MDP_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & SDE_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable &&
+ fmt->fetch_planes != SDE_MDP_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != SDE_MDP_FETCH_LINEAR) {
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ SDE_MDP_FETCH_CONFIG_RESET_VALUE |
+ ctx->highest_bank_bit << 18);
+ }
+
+ /* if this is YUV pixel format, enable CSC */
+ if (fmt->is_yuv)
+ src_format |= BIT(15);
+ _sspp_setup_opmode(ctx, CSC, fmt->is_yuv);
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
+ SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
+ SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+ SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
+
+ /* clear previous UBWC error */
+ SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < 4; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void sde_hw_sspp_setup_scalar(struct sde_hw_pipe *ctx,
+ struct sde_hw_pixel_ext *pe_ext)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int scale_config;
+ const u8 mask = 0x3;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALAR_QSEED2, &idx))
+ return;
+
+ scale_config = BIT(0) | BIT(1);
+ /* RGB/YUV config */
+ scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_LUMA] & mask) << 8;
+ scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_LUMA] & mask) << 10;
+ /* Aplha config*/
+ scale_config |= (pe_ext->horz_filter[SDE_SSPP_COMP_ALPHA] & mask) << 16;
+ scale_config |= (pe_ext->vert_filter[SDE_SSPP_COMP_ALPHA] & mask) << 18;
+
+ SDE_REG_WRITE(c, SCALE_CONFIG + idx, scale_config);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
+ pe_ext->init_phase_x[SDE_SSPP_COMP_LUMA]);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
+ pe_ext->init_phase_y[SDE_SSPP_COMP_LUMA]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
+ pe_ext->phase_step_x[SDE_SSPP_COMP_LUMA]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
+ pe_ext->phase_step_y[SDE_SSPP_COMP_LUMA]);
+
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
+ pe_ext->init_phase_x[SDE_SSPP_COMP_CHROMA]);
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
+ pe_ext->init_phase_y[SDE_SSPP_COMP_CHROMA]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
+ pe_ext->phase_step_x[SDE_SSPP_COMP_CHROMA]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
+ pe_ext->phase_step_y[SDE_SSPP_COMP_CHROMA]);
+}
+
+/**
+ * sde_hw_sspp_setup_rects()
+ */
+static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 decimation = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ /* program pixel extension override */
+ if (pe_ext)
+ sde_hw_sspp_setup_pe_config(ctx, cfg, pe_ext);
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y << 16) |
+ (cfg->src_rect.x);
+ src_size = (cfg->src_rect.h << 16) |
+ (cfg->src_rect.w);
+ dst_xy = (cfg->dst_rect.y << 16) |
+ (cfg->dst_rect.x);
+ dst_size = (cfg->dst_rect.h << 16) |
+ (cfg->dst_rect.w);
+
+ ystride0 = (cfg->src.ystride[0]) |
+ (cfg->src.ystride[1] << 16);
+ ystride1 = (cfg->src.ystride[2]) |
+ (cfg->src.ystride[3] << 16);
+
+ /* program scalar, phase registers, if pipes supporting scaling */
+ if (src_size != dst_size) {
+ if (test_bit(SDE_SSPP_SCALAR_RGB, &ctx->cap->features) ||
+ test_bit(SDE_SSPP_SCALAR_QSEED2, &ctx->cap->features)) {
+ /* program decimation */
+ decimation = ((1 << cfg->horz_decimation) - 1) << 8;
+ decimation |= ((1 << cfg->vert_decimation) - 1);
+
+ sde_hw_sspp_setup_scalar(ctx, pe_ext);
+ }
+ }
+
+ /* Rectangle Register programming */
+ SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size);
+ SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy);
+ SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size);
+ SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy);
+
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+ SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation);
+}
+
+static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ for (i = 0; i < cfg->src.num_planes; i++)
+ SDE_REG_WRITE(c, SSPP_SRC0_ADDR + idx + i*0x4,
+ cfg->addr.plane[i]);
+}
+
+static void sde_hw_sspp_setup_csc_8bit(struct sde_hw_pipe *ctx,
+ struct sde_csc_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ sde_hw_csc_setup(c, VIG_0_CSC_1_MATRIX_COEFF_0, data);
+}
+
+static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
+}
+
+static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx,
+ u32 const_color,
+ u32 flags)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 secure = 0;
+ u32 unpack, src_format, opmode = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ /* format info */
+ src_format = SSPP_SOLID_FILL;
+ unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
+ (C1_B_Cb << 8) | (C0_G_Y << 0);
+ secure = (flags & SDE_SSPP_SECURE_OVERLAY_SESSION) ? 0xF : 0x00;
+ opmode = MDSS_MDP_OP_PE_OVERRIDE;
+
+ SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
+ SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
+ SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
+ SDE_REG_WRITE(c, SSPP_SRC_CONSTANT_COLOR + idx, const_color);
+ SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+}
+
+static void sde_hw_sspp_setup_histogram_v1(struct sde_hw_pipe *ctx,
+ void *cfg)
+{
+}
+
+static void sde_hw_sspp_setup_memcolor(struct sde_hw_pipe *ctx,
+ u32 memcolortype, u8 en)
+{
+}
+
+static void sde_hw_sspp_setup_igc(struct sde_hw_pipe *ctx)
+{
+}
+
+void sde_sspp_setup_pa(struct sde_hw_pipe *c)
+{
+}
+
+static void sde_hw_sspp_setup_danger_safe(struct sde_hw_pipe *ctx,
+ u32 danger_lut, u32 safe_lut)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(c, SSPP_DANGER_LUT + idx, danger_lut);
+ SDE_REG_WRITE(c, SSPP_SAFE_LUT + idx, safe_lut);
+}
+
+static void sde_hw_sspp_qseed2_coeff(void *ctx)
+{
+}
+
+static void _setup_layer_ops(struct sde_hw_sspp_ops *ops,
+ unsigned long features)
+{
+ if (test_bit(SDE_SSPP_SRC, &features)) {
+ ops->setup_sourceformat = sde_hw_sspp_setup_format;
+ ops->setup_rects = sde_hw_sspp_setup_rects;
+ ops->setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
+ ops->setup_solidfill = sde_hw_sspp_setup_solidfill;
+ ops->setup_danger_safe = sde_hw_sspp_setup_danger_safe;
+ }
+ if (test_bit(SDE_SSPP_CSC, &features))
+ ops->setup_csc = sde_hw_sspp_setup_csc_8bit;
+
+ if (test_bit(SDE_SSPP_PA_V1, &features)) {
+ ops->setup_sharpening = sde_hw_sspp_setup_sharpening;
+ ops->setup_pa_memcolor = sde_hw_sspp_setup_memcolor;
+ }
+ if (test_bit(SDE_SSPP_HIST_V1, &features))
+ ops->setup_histogram = sde_hw_sspp_setup_histogram_v1;
+
+ if (test_bit(SDE_SSPP_IGC, &features))
+ ops->setup_igc = sde_hw_sspp_setup_igc;
+}
+
+static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->sspp_count; i++) {
+ if (sspp == m->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->sspp[i].base;
+ b->hwversion = m->hwversion;
+ return &m->sspp[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_pipe *c;
+ struct sde_sspp_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_layer_ops(&c->ops, c->cap->features);
+ c->highest_bank_bit = m->mdp[0].highest_bank_bit;
+
+ /*
+ * Perform any default initialization for the sspp blocks
+ */
+ if (test_bit(SDE_SSPP_SCALAR_QSEED2, &cfg->features))
+ sde_hw_sspp_qseed2_coeff(c);
+
+ if (test_bit(SDE_MDP_PANIC_PER_PIPE, &m->mdp[0].features))
+ sde_hw_sspp_setup_danger_safe(c,
+ cfg->sblk->danger_lut,
+ cfg->sblk->safe_lut);
+
+ return c;
+}
+
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
+{
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
new file mode 100644
index 000000000000..0e78c52cde56
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -0,0 +1,282 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_SSPP_H
+#define _SDE_HW_SSPP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_mdp_util.h"
+
+struct sde_hw_pipe;
+
+/**
+ * Flags
+ */
+#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1
+#define SDE_SSPP_FLIP_LR 0x2
+#define SDE_SSPP_FLIP_UD 0x4
+#define SDE_SSPP_SOURCE_ROTATED_90 0x8
+#define SDE_SSPP_ROT_90 0x10
+
+/**
+ * Component indices
+ */
+enum {
+ SDE_SSPP_COMP_LUMA = 0,
+ SDE_SSPP_COMP_CHROMA = 1,
+ SDE_SSPP_COMP_ALPHA = 3
+};
+
+enum {
+ SDE_MDP_FRAME_LINEAR,
+ SDE_MDP_FRAME_TILE_A4X,
+ SDE_MDP_FRAME_TILE_A5X,
+};
+
+enum sde_hw_filter {
+ SDE_MDP_SCALE_FILTER_NEAREST = 0,
+ SDE_MDP_SCALE_FILTER_BIL,
+ SDE_MDP_SCALE_FILTER_PCMN,
+ SDE_MDP_SCALE_FILTER_CA,
+ SDE_MDP_SCALE_FILTER_MAX
+};
+
+struct sde_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct sde_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[SDE_MAX_PLANES];
+ int phase_step_x[SDE_MAX_PLANES];
+ int init_phase_y[SDE_MAX_PLANES];
+ int phase_step_y[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[SDE_MAX_PLANES];
+ int num_ext_pxls_right[SDE_MAX_PLANES];
+ int num_ext_pxls_top[SDE_MAX_PLANES];
+ int num_ext_pxls_btm[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[SDE_MAX_PLANES];
+ int right_ftch[SDE_MAX_PLANES];
+ int top_ftch[SDE_MAX_PLANES];
+ int btm_ftch[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[SDE_MAX_PLANES];
+ int right_rpt[SDE_MAX_PLANES];
+ int top_rpt[SDE_MAX_PLANES];
+ int btm_rpt[SDE_MAX_PLANES];
+
+ uint32_t roi_w[SDE_MAX_PLANES];
+ uint32_t roi_h[SDE_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum sde_hw_filter horz_filter[SDE_MAX_PLANES];
+ enum sde_hw_filter vert_filter[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_hw_pipe_cfg : Pipe description
+ * @src: source surface information
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16)
+ * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16)
+ * 2: Read 1 line/pixel drop 1 line/pixel
+ * 4: Read 1 line/pixel drop 3 lines/pixels
+ * 8: Read 1 line/pixel drop 7 lines/pixels
+ * 16: Read 1 line/pixel drop 15 line/pixels
+ * @addr: source surface address
+ */
+struct sde_hw_pipe_cfg {
+ struct sde_hw_source_info src;
+ struct sde_rect src_rect;
+ struct sde_rect dst_rect;
+ u8 horz_decimation;
+ u8 vert_decimation;
+ struct addr_info addr;
+};
+
+/**
+ * struct danger_safe_cfg:
+ * @danger_lut:
+ * @safe_lut:
+ */
+struct danger_safe_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+};
+
+/**
+ * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_sspp_ops {
+ /**
+ * setup_sourceformat - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Format flags
+ */
+ void (*setup_sourceformat)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ u32 flags);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_rects)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ */
+ void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct sde_hw_pipe *ctx,
+ struct sde_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ */
+ void (*setup_solidfill)(struct sde_hw_pipe *ctx,
+ u32 const_color,
+ u32 flags);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_pa_memcolor - setup source color processing
+ * @ctx: Pointer to pipe context
+ * @memcolortype: Memcolor type
+ * @en: PA enable
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
+ u32 memcolortype, u8 en);
+
+ /**
+ * setup_igc - setup inverse gamma correction
+ * @ctx: Pointer to pipe context
+ */
+ void (*setup_igc)(struct sde_hw_pipe *ctx);
+
+ /**
+ * setup_danger_safe - setup danger safe LUTS
+ * @ctx: Pointer to pipe context
+ * @danger_lut: Danger LUT setting
+ * @safe_lut: Safe LUT setting
+ */
+ void (*setup_danger_safe)(struct sde_hw_pipe *ctx,
+ u32 danger_lut,
+ u32 safe_lut);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct sde_hw_pipe *ctx,
+ void *cfg);
+};
+
+/**
+ * struct sde_hw_pipe - pipe description
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @hwversion mdss hw version number
+ * @idx: pipe index
+ * @type : pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
+ * supported for each pipe type
+ * @pipe_hw_cap: pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: pointer to operations possible for this pipe
+ */
+struct sde_hw_pipe {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* Pipe */
+ enum sde_sspp idx;
+ const struct sde_sspp_cfg *cap;
+ u32 highest_bank_bit;
+
+ /* Ops */
+ struct sde_hw_sspp_ops ops;
+};
+
+/**
+ * sde_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: pointer to mdss catalog data @ops:
+ */
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by sde_hw_sspp_init
+ */
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
+
+#endif /*_SDE_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
new file mode 100644
index 000000000000..9ed3f8586fd1
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -0,0 +1,120 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_wb.h"
+
+static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->wb[i].base;
+ b->hwversion = m->hwversion;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_wb_setup_csc_8bit(struct sde_hw_wb *ctx,
+ struct sde_csc_cfg *data)
+{
+}
+
+static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+
+static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+
+static void sde_hw_wb_setup_rotator(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+
+static void sde_hw_setup_dither(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+
+static void sde_hw_wb_setup_cdwn(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+static void sde_hw_wb_traffic_shaper(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+}
+
+static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
+ unsigned long features)
+{
+ if (test_bit(SDE_WB_CSC, &features))
+ ops->setup_csc_data = sde_hw_wb_setup_csc_8bit;
+
+ ops->setup_outaddress = sde_hw_wb_setup_outaddress;
+ ops->setup_outformat = sde_hw_wb_setup_format;
+
+ if (test_bit(SDE_WB_BLOCK_MODE, &features))
+ ops->setup_rotator = sde_hw_wb_setup_rotator;
+
+ if (test_bit(SDE_WB_DITHER, &features))
+ ops->setup_dither = sde_hw_setup_dither;
+
+ if (test_bit(SDE_WB_CHROMA_DOWN, &features))
+ ops->setup_cdwn = sde_hw_wb_setup_cdwn;
+
+ if (test_bit(SDE_WB_TRAFFIC_SHAPER, &features))
+ ops->setup_trafficshaper = sde_hw_wb_traffic_shaper;
+}
+
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_wb *c;
+ struct sde_wb_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (!cfg) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+
+ /*
+ * Perform any default initialization for the chroma down module
+ */
+
+ return c;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
new file mode 100644
index 000000000000..32e2ee87ef7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_WB_H
+#define _SDE_HW_WB_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_mdp_util.h"
+
+struct sde_hw_wb;
+
+struct sde_hw_wb_cfg {
+ struct sde_hw_source_info dest;
+};
+
+/**
+ *
+ * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_wb_ops {
+ void (*setup_csc_data)(struct sde_hw_wb *ctx,
+ struct sde_csc_cfg *data);
+
+ void (*setup_outaddress)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_rotator)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_dither)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_cdwn)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+};
+
+/**
+ * struct sde_hw_wb : WB driver object
+ * @struct sde_hw_blk_reg_map *hw;
+ * @idx
+ * @wb_hw_caps
+ * @mixer_hw_caps
+ * @ops
+ */
+struct sde_hw_wb {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* wb path */
+ int idx;
+ const struct sde_wb_cfg *caps;
+
+ /* ops */
+ struct sde_hw_wb_ops ops;
+};
+
+/**
+ * sde_hw_wb_init(): Initializes the wb_path hw driver object.
+ * should be called before accessing every mixer.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+#endif /*_SDE_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
new file mode 100644
index 000000000000..2531463b654e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hwio.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HWIO_H
+#define _SDE_HWIO_H
+
+#include "sde_hw_mdp_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define PPB0_CNTL 0x330
+#define PPB0_CONFIG 0x334
+#define PPB1_CNTL 0x338
+#define PPB1_CONFIG 0x33C
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_SDE_HWIO_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
new file mode 100644
index 000000000000..722845df3d0b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -0,0 +1,350 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+
+static void sde_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct sde_kms *sde_kms = arg;
+ struct sde_irq *irq_obj = &sde_kms->irq_obj;
+
+ /*
+ * Perform registered function callback
+ */
+ if (irq_obj->irq_cb_tbl && irq_obj->irq_cb_tbl[irq_idx].func)
+ irq_obj->irq_cb_tbl[irq_idx].func(
+ irq_obj->irq_cb_tbl[irq_idx].arg,
+ irq_idx);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: sde_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr,
+ irq_idx);
+}
+
+static void sde_irq_intf_error_handler(void *arg, int irq_idx)
+{
+ DRM_ERROR("INTF underrun detected, irq_idx=%d\n", irq_idx);
+}
+
+void sde_set_irqmask(struct sde_kms *sde_kms, uint32_t reg, uint32_t irqmask)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.set_mask)
+ return;
+
+ sde_kms->hw_intr->ops.set_mask(sde_kms->hw_intr, reg, irqmask);
+}
+
+int sde_irq_idx_lookup(struct sde_kms *sde_kms, enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+int sde_enable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.enable_irq)
+ return -EINVAL;
+
+ for (i = 0; i < irq_count; i++) {
+ ret = sde_kms->hw_intr->ops.enable_irq(
+ sde_kms->hw_intr,
+ irq_idxs[i]);
+ if (ret) {
+ DRM_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idxs[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int sde_disable_irq(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_irq)
+ return -EINVAL;
+
+ for (i = 0; i < irq_count; i++) {
+ ret = sde_kms->hw_intr->ops.disable_irq(
+ sde_kms->hw_intr,
+ irq_idxs[i]);
+ if (ret) {
+ DRM_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idxs[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int sde_register_irq_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ struct sde_irq_callback *irq_cb_tbl;
+ unsigned long irq_flags;
+
+ /*
+ * We allow NULL register_irq_cb as input for callback registration
+ */
+ if (!sde_kms || !sde_kms->irq_obj.irq_cb_tbl)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ DRM_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq_cb_tbl = sde_kms->irq_obj.irq_cb_tbl;
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ irq_cb_tbl[irq_idx].func = register_irq_cb ?
+ register_irq_cb->func : NULL;
+ irq_cb_tbl[irq_idx].arg = register_irq_cb ?
+ register_irq_cb->arg : NULL;
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+void sde_clear_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+}
+
+void sde_disable_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
+}
+
+void sde_irq_preinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sde_enable(sde_kms);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_disable(sde_kms);
+
+ spin_lock_init(&sde_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+ sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(struct sde_irq_callback), GFP_KERNEL);
+ if (!sde_kms->irq_obj.irq_cb_tbl)
+ DRM_ERROR("Fail to allocate memory of IRQ callback list\n");
+}
+
+int sde_irq_postinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_irq_callback irq_cb;
+ int irq_idx;
+ int i;
+
+ irq_cb.func = sde_irq_intf_error_handler;
+ irq_cb.arg = sde_kms;
+
+ /* Register interface underrun callback */
+ sde_enable(sde_kms);
+ for (i = 0; i < sde_kms->catalog->intf_count; i++) {
+ irq_idx = sde_irq_idx_lookup(sde_kms,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN, i+INTF_0);
+ sde_register_irq_callback(sde_kms, irq_idx, &irq_cb);
+ sde_enable_irq(sde_kms, &irq_idx, 1);
+ }
+ sde_disable(sde_kms);
+
+ return 0;
+}
+
+void sde_irq_uninstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sde_enable(sde_kms);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_disable(sde_kms);
+
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+}
+
+static void _sde_irq_mdp_done(struct sde_kms *sde_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * sde_irq_callback_handler with the irq_idx from the lookup table.
+ * sde_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ sde_kms->hw_intr->ops.dispatch_irqs(
+ sde_kms->hw_intr,
+ sde_irq_callback_handler,
+ sde_kms);
+}
+
+irqreturn_t sde_irq(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ u32 interrupts;
+
+ sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
+ &interrupts);
+
+ /*
+ * Taking care of MDP interrupt
+ */
+ if (interrupts & IRQ_SOURCE_MDP) {
+ interrupts &= ~IRQ_SOURCE_MDP;
+ _sde_irq_mdp_done(sde_kms);
+ }
+
+ /*
+ * Routing all other interrupts to external drivers
+ */
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ sde_kms->irqcontroller.domain, hwirq));
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return sde_crtc_vblank(crtc);
+}
+
+void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+}
+
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irqcontroller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms = d->host_data;
+ uint32_t valid_irqs;
+
+ sde_kms->hw_intr->ops.get_valid_interrupts(sde_kms->hw_intr,
+ &valid_irqs);
+
+ if (!(valid_irqs & (1 << hwirq)))
+ return -EPERM;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, sde_kms);
+
+ return 0;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_irq_domain_init(struct sde_kms *sde_kms)
+{
+ struct device *dev = sde_kms->dev->dev;
+ struct irq_domain *d;
+
+ d = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+
+ if (!d)
+ return -ENXIO;
+
+ sde_kms->irqcontroller.enabled_mask = 0;
+ sde_kms->irqcontroller.domain = d;
+
+ return 0;
+}
+
+int sde_irq_domain_fini(struct sde_kms *sde_kms)
+{
+ if (sde_kms->irqcontroller.domain) {
+ irq_domain_remove(sde_kms->irqcontroller.domain);
+ sde_kms->irqcontroller.domain = NULL;
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
new file mode 100644
index 000000000000..251003e5382c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -0,0 +1,496 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_crtc.h>
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_intf.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+#define DEFAULT_MDP_SRC_CLK 200000000
+
+int sde_disable(struct sde_kms *sde_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(sde_kms->ahb_clk);
+ clk_disable_unprepare(sde_kms->axi_clk);
+ clk_disable_unprepare(sde_kms->core_clk);
+ if (sde_kms->lut_clk)
+ clk_disable_unprepare(sde_kms->lut_clk);
+
+ return 0;
+}
+
+int sde_enable(struct sde_kms *sde_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(sde_kms->ahb_clk);
+ clk_prepare_enable(sde_kms->axi_clk);
+ clk_prepare_enable(sde_kms->core_clk);
+ if (sde_kms->lut_clk)
+ clk_prepare_enable(sde_kms->lut_clk);
+
+ return 0;
+}
+
+static void sde_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ sde_enable(sde_kms);
+}
+
+static void sde_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ sde_disable(sde_kms);
+}
+
+static void sde_wait_for_crtc_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+}
+static int modeset_init(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv = sde_kms->dev->dev_private;
+ int i;
+ int ret;
+ struct sde_mdss_cfg *catalog = sde_kms->catalog;
+ struct drm_device *dev = sde_kms->dev;
+ struct drm_plane *primary_planes[MAX_PLANES];
+ int primary_planes_idx = 0;
+
+ int num_private_planes = catalog->mixer_count;
+
+ ret = sde_irq_domain_init(sde_kms);
+ if (ret)
+ goto fail;
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ struct drm_plane *plane;
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
+ || !num_private_planes)
+ primary = false;
+
+ plane = sde_plane_init(dev, catalog->sspp[i].id, primary);
+ if (IS_ERR(plane)) {
+ pr_err("%s: sde_plane_init failed", __func__);
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ if (primary && num_private_planes)
+ num_private_planes--;
+ }
+
+ /* Need enough primary planes to assign one per mixer (CRTC) */
+ if (primary_planes_idx < catalog->mixer_count) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /*
+ * Enumerate displays supported
+ */
+ sde_encoders_init(dev);
+
+ /* Create one CRTC per display */
+ for (i = 0; i < priv->num_encoders; i++) {
+ /*
+ * Each CRTC receives a private plane. We start
+ * with first RGB, and then DMA and then VIG.
+ */
+ struct drm_crtc *crtc;
+
+ crtc = sde_crtc_init(dev, priv->encoders[i],
+ primary_planes[i], i);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /*
+ * Iterate through the list of encoders and
+ * set the possible CRTCs
+ */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ return ret;
+}
+
+static int sde_hw_init(struct msm_kms *kms)
+{
+ return 0;
+}
+
+static long sde_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void sde_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+}
+
+static void sde_destroy(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sde_irq_domain_fini(sde_kms);
+ sde_hw_intr_destroy(sde_kms->hw_intr);
+ kfree(sde_kms);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = sde_hw_init,
+ .irq_preinstall = sde_irq_preinstall,
+ .irq_postinstall = sde_irq_postinstall,
+ .irq_uninstall = sde_irq_uninstall,
+ .irq = sde_irq,
+ .prepare_commit = sde_prepare_commit,
+ .complete_commit = sde_complete_commit,
+ .wait_for_crtc_commit_done = sde_wait_for_crtc_commit_done,
+ .enable_vblank = sde_enable_vblank,
+ .disable_vblank = sde_disable_vblank,
+ .get_format = mdp_get_format,
+ .round_pixclk = sde_round_pixclk,
+ .preclose = sde_preclose,
+ .destroy = sde_destroy,
+};
+
+static int get_clk(struct platform_device *pdev, struct clk **clkp,
+ const char *name, bool mandatory)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk = devm_clk_get(dev, name);
+
+ if (IS_ERR(clk) && mandatory) {
+ dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ if (IS_ERR(clk))
+ DBG("skipping %s", name);
+ else
+ *clkp = clk;
+
+ return 0;
+}
+
+struct sde_kms *sde_hw_setup(struct platform_device *pdev)
+{
+ struct sde_kms *sde_kms;
+ struct msm_kms *kms = NULL;
+ int ret;
+
+ sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
+ if (!sde_kms)
+ return NULL;
+
+ msm_kms_init(&sde_kms->base, &kms_funcs);
+
+ kms = &sde_kms->base;
+
+ sde_kms->mmio = msm_ioremap(pdev, "mdp_phys", "SDE");
+ if (IS_ERR(sde_kms->mmio)) {
+ ret = PTR_ERR(sde_kms->mmio);
+ goto fail;
+ }
+ pr_err("Mapped Mdp address space @%pK", sde_kms->mmio);
+
+ sde_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(sde_kms->vbif)) {
+ ret = PTR_ERR(sde_kms->vbif);
+ goto fail;
+ }
+
+ sde_kms->venus = devm_regulator_get_optional(&pdev->dev, "gdsc-venus");
+ if (IS_ERR(sde_kms->venus)) {
+ ret = PTR_ERR(sde_kms->venus);
+ DBG("failed to get Venus GDSC regulator: %d\n", ret);
+ sde_kms->venus = NULL;
+ }
+
+ if (sde_kms->venus) {
+ ret = regulator_enable(sde_kms->venus);
+ if (ret) {
+ DBG("failed to enable venus GDSC: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ sde_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(sde_kms->vdd)) {
+ ret = PTR_ERR(sde_kms->vdd);
+ goto fail;
+ }
+
+ ret = regulator_enable(sde_kms->vdd);
+ if (ret) {
+ DBG("failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+
+ sde_kms->mmagic = devm_regulator_get_optional(&pdev->dev, "mmagic");
+ if (IS_ERR(sde_kms->mmagic)) {
+ ret = PTR_ERR(sde_kms->mmagic);
+ DBG("failed to get mmagic GDSC regulator: %d\n", ret);
+ sde_kms->mmagic = NULL;
+ }
+
+ /* mandatory clocks: */
+ ret = get_clk(pdev, &sde_kms->axi_clk, "bus_clk", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &sde_kms->ahb_clk, "iface_clk", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &sde_kms->src_clk, "core_clk_src", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &sde_kms->core_clk, "core_clk", true);
+ if (ret)
+ goto fail;
+ ret = get_clk(pdev, &sde_kms->vsync_clk, "vsync_clk", true);
+ if (ret)
+ goto fail;
+
+ /* optional clocks: */
+ get_clk(pdev, &sde_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &sde_kms->mmagic_clk, "mmagic_clk", false);
+ get_clk(pdev, &sde_kms->iommu_clk, "iommu_clk", false);
+
+ if (sde_kms->mmagic) {
+ ret = regulator_enable(sde_kms->mmagic);
+ if (ret) {
+ dev_err(sde_kms->dev->dev,
+ "failed to enable mmagic GDSC: %d\n", ret);
+ goto fail;
+ }
+ }
+ if (sde_kms->mmagic_clk) {
+ clk_prepare_enable(sde_kms->mmagic_clk);
+ if (ret) {
+ dev_err(sde_kms->dev->dev, "failed to enable mmagic_clk\n");
+ goto undo_gdsc;
+ }
+ }
+
+ return sde_kms;
+
+undo_gdsc:
+ if (sde_kms->mmagic)
+ regulator_disable(sde_kms->mmagic);
+fail:
+ if (kms)
+ sde_destroy(kms);
+
+ return ERR_PTR(ret);
+}
+
+static int sde_translation_ctrl_pwr(struct sde_kms *sde_kms, bool on)
+{
+ struct device *dev = sde_kms->dev->dev;
+ int ret;
+
+ if (on) {
+ if (sde_kms->iommu_clk) {
+ ret = clk_prepare_enable(sde_kms->iommu_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable iommu_clk\n");
+ goto undo_mmagic_clk;
+ }
+ }
+ } else {
+ if (sde_kms->iommu_clk)
+ clk_disable_unprepare(sde_kms->iommu_clk);
+ if (sde_kms->mmagic_clk)
+ clk_disable_unprepare(sde_kms->mmagic_clk);
+ if (sde_kms->mmagic)
+ regulator_disable(sde_kms->mmagic);
+ }
+
+ return 0;
+
+undo_mmagic_clk:
+ if (sde_kms->mmagic_clk)
+ clk_disable_unprepare(sde_kms->mmagic_clk);
+
+ return ret;
+}
+int sde_mmu_init(struct sde_kms *sde_kms)
+{
+ struct sde_mdss_cfg *catalog = sde_kms->catalog;
+ struct sde_hw_intf *intf = NULL;
+ struct iommu_domain *iommu;
+ struct msm_mmu *mmu;
+ int i, ret;
+
+ /*
+ * Make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ sde_enable(sde_kms);
+ for (i = 0; i < catalog->intf_count; i++) {
+ intf = sde_hw_intf_init(catalog->intf[i].id,
+ sde_kms->mmio,
+ catalog);
+ if (!IS_ERR_OR_NULL(intf)) {
+ intf->ops.enable_timing(intf, 0x0);
+ sde_hw_intf_deinit(intf);
+ }
+ }
+ sde_disable(sde_kms);
+ msleep(20);
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+
+ if (!IS_ERR_OR_NULL(iommu)) {
+ mmu = msm_smmu_new(sde_kms->dev->dev, MSM_SMMU_DOMAIN_UNSECURE);
+ if (IS_ERR(mmu)) {
+ ret = PTR_ERR(mmu);
+ dev_err(sde_kms->dev->dev,
+ "failed to init iommu: %d\n", ret);
+ iommu_domain_free(iommu);
+ goto fail;
+ }
+
+ ret = sde_translation_ctrl_pwr(sde_kms, true);
+ if (ret) {
+ dev_err(sde_kms->dev->dev,
+ "failed to power iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+
+ ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ dev_err(sde_kms->dev->dev,
+ "failed to attach iommu: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ } else {
+ dev_info(sde_kms->dev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ mmu = NULL;
+ }
+ sde_kms->mmu = mmu;
+
+ sde_kms->mmu_id = msm_register_mmu(sde_kms->dev, mmu);
+ if (sde_kms->mmu_id < 0) {
+ ret = sde_kms->mmu_id;
+ dev_err(sde_kms->dev->dev,
+ "failed to register sde iommu: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return ret;
+
+}
+
+struct msm_kms *sde_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct sde_mdss_cfg *catalog;
+ struct sde_kms *sde_kms;
+ struct msm_kms *msm_kms;
+ int ret = 0;
+
+ sde_kms = sde_hw_setup(pdev);
+ if (IS_ERR(sde_kms)) {
+ ret = PTR_ERR(sde_kms);
+ goto fail;
+ }
+
+ sde_kms->dev = dev;
+ msm_kms = &sde_kms->base;
+
+ /*
+ * Currently hardcoding to MDSS version 1.7.0 (8996)
+ */
+ catalog = sde_hw_catalog_init(1, 7, 0);
+ if (!catalog)
+ goto fail;
+
+ sde_kms->catalog = catalog;
+
+ /* we need to set a default rate before enabling.
+ * Set a safe rate first, before initializing catalog
+ * later set more optimal rate based on bandwdith/clock
+ * requirements
+ */
+
+ clk_set_rate(sde_kms->src_clk, DEFAULT_MDP_SRC_CLK);
+ sde_enable(sde_kms);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ sde_mmu_init(sde_kms);
+
+ /*
+ * modeset_init should create the DRM related objects i.e. CRTCs,
+ * planes, encoders, connectors and so forth
+ */
+ modeset_init(sde_kms);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * we can assume the max crtc width is equal to the max supported
+ * by LM_0
+ * Also fixing the max height to 4k
+ */
+ dev->mode_config.max_width = catalog->mixer[0].sblk->maxwidth;
+ dev->mode_config.max_height = 4096;
+
+ sde_enable(sde_kms);
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ sde_disable(sde_kms);
+
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr))
+ goto fail;
+
+ return msm_kms;
+
+fail:
+ if (msm_kms)
+ sde_destroy(msm_kms);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
new file mode 100644
index 000000000000..e56fa16423e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -0,0 +1,230 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_KMS_H__
+#define __SDE_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "mdp/mdp_kms.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_interrupts.h"
+
+/*
+ * struct sde_irq_callback - IRQ callback handlers
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct sde_irq_callback {
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct sde_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @cb_lock: callback lock
+ */
+struct sde_irq {
+ u32 total_irqs;
+ struct sde_irq_callback *irq_cb_tbl;
+ spinlock_t cb_lock;
+};
+
+struct sde_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int rev;
+ struct sde_mdss_cfg *catalog;
+
+ struct msm_mmu *mmu;
+ int mmu_id;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct clk *axi_clk;
+ struct clk *ahb_clk;
+ struct clk *src_clk;
+ struct clk *core_clk;
+ struct clk *lut_clk;
+ struct clk *mmagic_clk;
+ struct clk *iommu_clk;
+ struct clk *vsync_clk;
+
+ struct {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+ } irqcontroller;
+
+ struct sde_hw_intr *hw_intr;
+ struct sde_irq irq_obj;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_sde_kms(x) container_of(x, struct sde_kms, base)
+
+struct sde_plane_state {
+ struct drm_plane_state base;
+
+ /* aligned with property */
+ uint8_t premultiplied;
+ uint8_t zpos;
+ uint8_t alpha;
+
+ /* assigned by crtc blender */
+ enum sde_stage stage;
+
+ /* some additional transactional status to help us know in the
+ * apply path whether we need to update SMP allocation, and
+ * whether current update is still pending:
+ */
+ bool mode_changed : 1;
+ bool pending : 1;
+};
+
+#define to_sde_plane_state(x) \
+ container_of(x, struct sde_plane_state, base)
+
+int sde_disable(struct sde_kms *sde_kms);
+int sde_enable(struct sde_kms *sde_kms);
+
+/**
+ * IRQ functions
+ */
+int sde_irq_domain_init(struct sde_kms *sde_kms);
+int sde_irq_domain_fini(struct sde_kms *sde_kms);
+void sde_irq_preinstall(struct msm_kms *kms);
+int sde_irq_postinstall(struct msm_kms *kms);
+void sde_irq_uninstall(struct msm_kms *kms);
+irqreturn_t sde_irq(struct msm_kms *kms);
+
+/**
+ * sde_set_irqmask - IRQ helper function for writing IRQ mask
+ * to SDE HW interrupt register.
+ * @sde_kms: SDE handle
+ * @reg_off: SDE HW interrupt register offset
+ * @irqmask: IRQ mask
+ */
+void sde_set_irqmask(
+ struct sde_kms *sde_kms,
+ uint32_t reg_off,
+ uint32_t irqmask);
+
+/**
+ * sde_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @sde_kms: SDE handle
+ * @intr_type: SDE HW interrupt type for lookup
+ * @instance_idx: SDE HW block instance defined in sde_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int sde_irq_idx_lookup(
+ struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * sde_enable_irq - IRQ helper function for enabling one or more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ */
+int sde_enable_irq(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_disable_irq - IRQ helper function for diabling one of more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ */
+int sde_disable_irq(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_register_irq_callback - For registering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * @return: 0 for success registering callback, otherwise failure
+ */
+int sde_register_irq_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+/**
+ * sde_clear_all_irqs - Clearing all SDE IRQ interrupt status
+ * @sde_kms: SDE handle
+ */
+void sde_clear_all_irqs(struct sde_kms *sde_kms);
+
+/**
+ * sde_disable_all_irqs - Diabling all SDE IRQ interrupt
+ * @sde_kms: SDE handle
+ */
+void sde_disable_all_irqs(struct sde_kms *sde_kms);
+
+/**
+ * Vblank enable/disable functions
+ */
+int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
+struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
+ bool private_plane);
+
+uint32_t sde_crtc_vblank(struct drm_crtc *crtc);
+
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+void sde_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
+void sde_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
+struct drm_crtc *sde_crtc_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_plane *plane, int id);
+
+struct sde_encoder_hw_resources {
+ bool intfs[INTF_MAX];
+ bool pingpongs[PINGPONG_MAX];
+};
+void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct sde_encoder_hw_resources *hw_res);
+void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*cb)(void *), void *data);
+void sde_encoders_init(struct drm_device *dev);
+
+
+int sde_irq_domain_init(struct sde_kms *sde_kms);
+int sde_irq_domain_fini(struct sde_kms *sde_kms);
+
+#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_mdp_formats.c b/drivers/gpu/drm/msm/sde/sde_mdp_formats.c
new file mode 100644
index 000000000000..56b65d4bd45e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_mdp_formats.c
@@ -0,0 +1,134 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "sde_mdp_formats.h"
+
+static struct sde_mdp_format_params sde_mdp_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+ true, 4, 0),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ true, 4, 0),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ true, 4, 0),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ true, 4, 0),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+ true, 4, 0),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
+ false, 3, 0),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
+ false, 3, 0),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0,
+ false, 2, 0),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, 5, 6, 5,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0,
+ false, 2, 0),
+
+ PSEDUO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_MDP_CHROMA_420, 0),
+
+ PSEDUO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_MDP_CHROMA_420, 0),
+
+ PSEDUO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_MDP_CHROMA_H2V1, 0),
+
+ PSEDUO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_MDP_CHROMA_H2V1, 0),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, SDE_MDP_CHROMA_H2V1, 4, 2,
+ 0),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, SDE_MDP_CHROMA_H2V1, 4, 2,
+ 0),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, SDE_MDP_CHROMA_H2V1, 4, 2,
+ 0),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, SDE_MDP_CHROMA_H2V1, 4, 2,
+ 0),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, SDE_MDP_CHROMA_420, 2,
+ 0),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, SDE_MDP_CHROMA_420, 2,
+ 0),
+};
+
+struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format,
+ u32 fmt_modifier)
+{
+ u32 i = 0;
+ struct sde_mdp_format_params *fmt = NULL;
+
+ for (i = 0; i < sizeof(sde_mdp_format_map)/sizeof(*sde_mdp_format_map);
+ i++)
+ if (format == sde_mdp_format_map[i].format) {
+ fmt = &sde_mdp_format_map[i];
+ break;
+ }
+
+ return fmt;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_mdp_formats.h b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h
new file mode 100644
index 000000000000..e6f1c60aad11
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_mdp_formats.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_MDP_FORMATS_H
+#define _SDE_MDP_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "sde_hw_mdss.h"
+
+/**
+ * MDP supported format packing, bpp, and other format
+ * information.
+ * MDP currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, alpha, bp, flg) \
+{ \
+ .format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { a, r, g, b}, \
+ .chroma_sample = SDE_MDP_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = (alpha == true) ? 4:3, \
+ .bpp = bp, \
+ .fetch_mode = SDE_MDP_FETCH_LINEAR, \
+ .is_yuv = false, \
+ .flag = flg \
+}
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg) \
+{ \
+ .format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { a, r, g, b}, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = SDE_MDP_FETCH_LINEAR, \
+ .is_yuv = true, \
+ .flag = flg \
+}
+
+#define PSEDUO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg) \
+{ \
+ .format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { a, r, g, b}, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = SDE_MDP_FETCH_LINEAR, \
+ .is_yuv = true, \
+ .flag = flg \
+}
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, flg)\
+{ \
+ .format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { a, r, g, b}, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 0, \
+ .bpp = bp, \
+ .fetch_mode = SDE_MDP_FETCH_LINEAR, \
+ .is_yuv = true, \
+ .flag = flg \
+}
+
+/**
+ * sde_mdp_get_format_params(): Returns sde format structure pointer.
+ * @format: DRM format
+ * @fmt_modifier: DRM format modifier
+ */
+struct sde_mdp_format_params *sde_mdp_get_format_params(u32 format,
+ u32 fmt_modifier);
+
+#endif /*_SDE_MDP_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
new file mode 100644
index 000000000000..cf34de2f1e3d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -0,0 +1,767 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_kms.h"
+#include "sde_hwio.h"
+#include "sde_hw_mdp_ctl.h"
+#include "sde_mdp_formats.h"
+#include "sde_hw_sspp.h"
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SDE_PLANE_FEATURE_SCALER \
+ (BIT(SDE_SSPP_SCALAR_QSEED2)| \
+ BIT(SDE_SSPP_SCALAR_QSEED3)| \
+ BIT(SDE_SSPP_SCALAR_RGB))
+
+#ifndef SDE_PLANE_DEBUG_START
+#define SDE_PLANE_DEBUG_START()
+#endif
+
+#ifndef SDE_PLANE_DEBUG_END
+#define SDE_PLANE_DEBUG_END()
+#endif
+
+struct sde_plane {
+ struct drm_plane base;
+ const char *name;
+
+ int mmu_id;
+
+ enum sde_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t flush_mask; /* used to commit pipe registers */
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ struct sde_hw_pipe *pipe_hw;
+ struct sde_hw_pipe_cfg pipe_cfg;
+ struct sde_hw_pixel_ext pixel_ext;
+};
+#define to_sde_plane(x) container_of(x, struct sde_plane, base)
+
+static bool sde_plane_enabled(struct drm_plane_state *state)
+{
+ return state->fb && state->crtc;
+}
+
+static void sde_plane_set_scanout(struct drm_plane *plane,
+ struct sde_hw_pipe_cfg *pipe_cfg, struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde = to_sde_plane(plane);
+ int i;
+
+ if (pipe_cfg && fb && psde->pipe_hw->ops.setup_sourceaddress) {
+ /* stride */
+ i = min_t(int, ARRAY_SIZE(fb->pitches), SDE_MAX_PLANES);
+ while (i) {
+ --i;
+ pipe_cfg->src.ystride[i] = fb->pitches[i];
+ }
+
+ /* address */
+ for (i = 0; i < ARRAY_SIZE(pipe_cfg->addr.plane); ++i)
+ pipe_cfg->addr.plane[i] = msm_framebuffer_iova(fb,
+ psde->mmu_id, i);
+
+ /* hw driver */
+ psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
+ }
+}
+
+static void sde_plane_scale_helper(struct drm_plane *plane,
+ uint32_t src, uint32_t dst, uint32_t *phase_steps,
+ enum sde_hw_filter *filter, struct sde_mdp_format_params *fmt,
+ uint32_t chroma_subsampling)
+{
+ /* calcualte phase steps, leave init phase as zero */
+ phase_steps[SDE_SSPP_COMP_LUMA] =
+ mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
+ phase_steps[SDE_SSPP_COMP_CHROMA] =
+ phase_steps[SDE_SSPP_COMP_LUMA] / chroma_subsampling;
+
+ /* calculate scaler config, if necessary */
+ if (src != dst) {
+ filter[SDE_SSPP_COMP_ALPHA] = (src < dst) ?
+ SDE_MDP_SCALE_FILTER_BIL :
+ SDE_MDP_SCALE_FILTER_PCMN;
+
+ if (fmt->is_yuv)
+ filter[SDE_SSPP_COMP_LUMA] = SDE_MDP_SCALE_FILTER_CA;
+ else
+ filter[SDE_SSPP_COMP_LUMA] =
+ filter[SDE_SSPP_COMP_ALPHA];
+ }
+}
+
+/* CIFIX: clean up fmt/subsampling params once we're using fourcc formats */
+static void _sde_plane_pixel_ext_helper(struct drm_plane *plane,
+ uint32_t src, uint32_t dst, uint32_t decimated_src,
+ uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
+ int *out_edge2, struct sde_mdp_format_params *fmt,
+ uint32_t chroma_subsampling, bool post_compare)
+{
+ /* CIFIX: adapted from mdss_mdp_pipe_calc_pixel_extn() */
+ int64_t edge1, edge2, caf;
+ uint32_t src_work;
+ int i, tmp;
+
+ if (plane && phase_steps && out_src && out_edge1 && out_edge2 && fmt) {
+ /* enable CAF for YUV formats */
+ if (fmt->is_yuv)
+ caf = PHASE_STEP_UNIT_SCALE;
+ else
+ caf = 0;
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ src_work = decimated_src;
+ if (i == 1 || i == 2)
+ src_work /= chroma_subsampling;
+ if (post_compare)
+ src = src_work;
+ if (!(fmt->is_yuv) && (src == dst)) {
+ /* unity */
+ edge1 = 0;
+ edge2 = 0;
+ } else if (dst >= src) {
+ /* upscale */
+ edge1 = (1 << PHASE_RESIDUAL);
+ edge1 -= caf;
+ edge2 = (1 << PHASE_RESIDUAL);
+ edge2 += (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += caf;
+ edge2 = -(edge2);
+ } else {
+ /* downscale */
+ edge1 = 0;
+ edge2 = (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += *(phase_steps + i);
+ edge2 = -(edge2);
+ }
+
+ /* only enable CAF for luma plane */
+ caf = 0;
+
+ /* populate output arrays */
+ *(out_src + i) = src_work;
+
+ /* edge updates taken from __pxl_extn_helper */
+ /* CIFIX: why are we casting first to uint32_t? */
+ if (edge1 >= 0) {
+ tmp = (uint32_t)edge1;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge1 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge1);
+ *(out_edge1 + i) = (tmp + PHASE_STEP_UNIT_SCALE
+ - 1) >> PHASE_STEP_SHIFT;
+ }
+ if (edge2 >= 0) {
+ tmp = (uint32_t)edge2;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge2 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge2);
+ *(out_edge2 + i) = (tmp + PHASE_STEP_UNIT_SCALE
+ - 1) >> PHASE_STEP_SHIFT;
+ }
+ }
+ }
+}
+
+static int sde_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *pstate;
+ const struct mdp_format *format;
+ uint32_t nplanes, pix_format, tmp;
+ int i;
+ struct sde_mdp_format_params *fmt;
+ struct sde_hw_pixel_ext *pe;
+ int ret = 0;
+
+ SDE_PLANE_DEBUG_START();
+ nplanes = drm_format_num_planes(fb->pixel_format);
+
+ pstate = to_sde_plane_state(plane->state);
+
+ format = to_mdp_format(msm_framebuffer_format(fb));
+ pix_format = format->base.pixel_format;
+
+ /* src values are in Q16 fixed point, convert to integer */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", psde->name,
+ fb->base.id, src_x, src_y, src_w, src_h,
+ crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /* update format configuration */
+ memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
+
+ psde->pipe_cfg.src.format = sde_mdp_get_format_params(pix_format,
+ 0/* CIFIX: fmt_modifier */);
+ psde->pipe_cfg.src.width = fb->width;
+ psde->pipe_cfg.src.height = fb->height;
+ psde->pipe_cfg.src.num_planes = nplanes;
+
+ sde_plane_set_scanout(plane, &psde->pipe_cfg, fb);
+
+ psde->pipe_cfg.src_rect.x = src_x;
+ psde->pipe_cfg.src_rect.y = src_y;
+ psde->pipe_cfg.src_rect.w = src_w;
+ psde->pipe_cfg.src_rect.h = src_h;
+
+ psde->pipe_cfg.dst_rect.x = crtc_x;
+ psde->pipe_cfg.dst_rect.y = crtc_y;
+ psde->pipe_cfg.dst_rect.w = crtc_w;
+ psde->pipe_cfg.dst_rect.h = crtc_h;
+
+ psde->pipe_cfg.horz_decimation = 0;
+ psde->pipe_cfg.vert_decimation = 0;
+
+ /* get sde pixel format definition */
+ fmt = psde->pipe_cfg.src.format;
+
+ /* update pixel extensions */
+ pe = &(psde->pixel_ext);
+ if (!pe->enable_pxl_ext) {
+ uint32_t chroma_subsample_h, chroma_subsample_v;
+
+ chroma_subsample_h = psde->pipe_cfg.horz_decimation ? 1 :
+ drm_format_horz_chroma_subsampling(pix_format);
+ chroma_subsample_v = psde->pipe_cfg.vert_decimation ? 1 :
+ drm_format_vert_chroma_subsampling(pix_format);
+
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+
+ /* calculate phase steps */
+ sde_plane_scale_helper(plane, src_w, crtc_w,
+ pe->phase_step_x,
+ pe->horz_filter, fmt, chroma_subsample_h);
+ sde_plane_scale_helper(plane, src_h, crtc_h,
+ pe->phase_step_y,
+ pe->vert_filter, fmt, chroma_subsample_v);
+
+ /* calculate left/right/top/bottom pixel extentions */
+ tmp = DECIMATED_DIMENSION(src_w,
+ psde->pipe_cfg.horz_decimation);
+ if (fmt->is_yuv)
+ tmp &= ~0x1;
+ _sde_plane_pixel_ext_helper(plane, src_w, crtc_w, tmp,
+ pe->phase_step_x,
+ pe->roi_w,
+ pe->num_ext_pxls_left,
+ pe->num_ext_pxls_right, fmt,
+ chroma_subsample_h, 0);
+
+ tmp = DECIMATED_DIMENSION(src_h,
+ psde->pipe_cfg.vert_decimation);
+ _sde_plane_pixel_ext_helper(plane, src_h, crtc_h, tmp,
+ pe->phase_step_y,
+ pe->roi_h,
+ pe->num_ext_pxls_top,
+ pe->num_ext_pxls_btm, fmt,
+ chroma_subsample_v, 1);
+
+ /* CIFIX: port "Single pixel rgb scale adjustment"? */
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ if (pe->num_ext_pxls_left[i] >= 0)
+ pe->left_rpt[i] =
+ pe->num_ext_pxls_left[i];
+ else
+ pe->left_ftch[i] =
+ pe->num_ext_pxls_left[i];
+
+ if (pe->num_ext_pxls_right[i] >= 0)
+ pe->right_rpt[i] =
+ pe->num_ext_pxls_right[i];
+ else
+ pe->right_ftch[i] =
+ pe->num_ext_pxls_right[i];
+
+ if (pe->num_ext_pxls_top[i] >= 0)
+ pe->top_rpt[i] =
+ pe->num_ext_pxls_top[i];
+ else
+ pe->top_ftch[i] =
+ pe->num_ext_pxls_top[i];
+
+ if (pe->num_ext_pxls_btm[i] >= 0)
+ pe->btm_rpt[i] =
+ pe->num_ext_pxls_btm[i];
+ else
+ pe->btm_ftch[i] =
+ pe->num_ext_pxls_btm[i];
+ }
+ }
+
+ if (psde->pipe_hw->ops.setup_sourceformat)
+ psde->pipe_hw->ops.setup_sourceformat(psde->pipe_hw,
+ &psde->pipe_cfg, 0 /* CIFIX: flags */);
+ if (psde->pipe_hw->ops.setup_rects)
+ psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
+ &psde->pipe_cfg, &psde->pixel_ext);
+
+ /* update csc */
+
+ SDE_PLANE_DEBUG_END();
+ return ret;
+}
+
+static int sde_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct sde_plane *psde = to_sde_plane(plane);
+
+ if (!new_state->fb)
+ return 0;
+
+ SDE_PLANE_DEBUG_START();
+ SDE_PLANE_DEBUG_END();
+ DBG("%s: prepare: FB[%u]", psde->name, fb->base.id);
+ return msm_framebuffer_prepare(fb, psde->mmu_id);
+}
+
+static void sde_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state->fb;
+ struct sde_plane *psde = to_sde_plane(plane);
+
+ if (!fb)
+ return;
+
+ SDE_PLANE_DEBUG_START();
+ SDE_PLANE_DEBUG_END();
+ DBG("%s: cleanup: FB[%u]", psde->name, fb->base.id);
+ msm_framebuffer_cleanup(fb, psde->mmu_id);
+}
+
+static int sde_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sde_plane *psde = to_sde_plane(plane);
+ struct drm_plane_state *old_state = plane->state;
+ const struct mdp_format *format;
+
+ SDE_PLANE_DEBUG_START();
+ SDE_PLANE_DEBUG_END();
+ DBG("%s: check (%d -> %d)", psde->name,
+ sde_plane_enabled(old_state), sde_plane_enabled(state));
+
+ if (sde_plane_enabled(state)) {
+ /* CIFIX: don't use mdp format? */
+ format = to_mdp_format(msm_framebuffer_format(state->fb));
+ if (MDP_FORMAT_IS_YUV(format) &&
+ (!(psde->features & SDE_PLANE_FEATURE_SCALER) ||
+ !(psde->features & BIT(SDE_SSPP_CSC)))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support YUV\n");
+
+ return -EINVAL;
+ }
+
+ if (!(psde->features & SDE_PLANE_FEATURE_SCALER) &&
+ (((state->src_w >> 16) != state->crtc_w) ||
+ ((state->src_h >> 16) != state->crtc_h))) {
+ dev_err(plane->dev->dev,
+ "Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+
+ return -EINVAL;
+ }
+ }
+
+ if (sde_plane_enabled(state) && sde_plane_enabled(old_state)) {
+ /* we cannot change SMP block configuration during scanout: */
+ bool full_modeset = false;
+
+ if (state->fb->pixel_format != old_state->fb->pixel_format) {
+ DBG("%s: pixel_format change!", psde->name);
+ full_modeset = true;
+ }
+ if (state->src_w != old_state->src_w) {
+ DBG("%s: src_w change!", psde->name);
+ full_modeset = true;
+ }
+ if (to_sde_plane_state(old_state)->pending) {
+ DBG("%s: still pending!", psde->name);
+ full_modeset = true;
+ }
+ if (full_modeset) {
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_crtc_state(state->state,
+ state->crtc);
+ crtc_state->mode_changed = true;
+ to_sde_plane_state(state)->mode_changed = true;
+ }
+ } else {
+ to_sde_plane_state(state)->mode_changed = true;
+ }
+
+ return 0;
+}
+
+static void sde_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane *sde_plane = to_sde_plane(plane);
+ struct drm_plane_state *state = plane->state;
+
+ DBG("%s: update", sde_plane->name);
+
+ SDE_PLANE_DEBUG_START();
+ if (!sde_plane_enabled(state)) {
+ to_sde_plane_state(state)->pending = true;
+ } else if (to_sde_plane_state(state)->mode_changed) {
+ int ret;
+
+ to_sde_plane_state(state)->pending = true;
+ ret = sde_plane_mode_set(plane,
+ state->crtc, state->fb,
+ state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x, state->src_y,
+ state->src_w, state->src_h);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ } else {
+ sde_plane_set_scanout(plane, &sde_plane->pipe_cfg, state->fb);
+ }
+ SDE_PLANE_DEBUG_END();
+}
+
+/* helper to install properties which are common to planes and crtcs */
+static void sde_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+
+ SDE_PLANE_DEBUG_START();
+#define INSTALL_PROPERTY(name, NAME, init_val, fnc, ...) do { \
+ prop = dev_priv->plane_property[PLANE_PROP_##NAME]; \
+ if (!prop) { \
+ prop = drm_property_##fnc(dev, 0, #name, \
+ ##__VA_ARGS__); \
+ if (!prop) { \
+ dev_warn(dev->dev, \
+ "Create property %s failed\n", \
+ #name); \
+ return; \
+ } \
+ dev_priv->plane_property[PLANE_PROP_##NAME] = prop; \
+ } \
+ drm_object_attach_property(&plane->base, prop, init_val); \
+ } while (0)
+
+#define INSTALL_RANGE_PROPERTY(name, NAME, min, max, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_range, min, max)
+
+#define INSTALL_ENUM_PROPERTY(name, NAME, init_val) \
+ INSTALL_PROPERTY(name, NAME, init_val, \
+ create_enum, name##_prop_enum_list, \
+ ARRAY_SIZE(name##_prop_enum_list))
+
+ INSTALL_RANGE_PROPERTY(zpos, ZPOS, 1, 255, 1);
+
+#undef INSTALL_RANGE_PROPERTY
+#undef INSTALL_ENUM_PROPERTY
+#undef INSTALL_PROPERTY
+ SDE_PLANE_DEBUG_END();
+}
+
+static int sde_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sde_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ SDE_PLANE_DEBUG_START();
+
+ pstate = to_sde_plane_state(state);
+
+#define SET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ pstate->name = (type)val; \
+ DBG("Set property %s %d", #name, (type)val); \
+ goto done; \
+ } \
+ } while (0)
+
+ SET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ SDE_PLANE_DEBUG_END();
+ return ret;
+#undef SET_PROPERTY
+}
+
+static int sde_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ int rc;
+
+ SDE_PLANE_DEBUG_START();
+ rc = sde_plane_atomic_set_property(plane, plane->state, property,
+ val);
+ SDE_PLANE_DEBUG_END();
+ return rc;
+}
+
+static int sde_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sde_plane_state *pstate;
+ struct msm_drm_private *dev_priv = dev->dev_private;
+ int ret = 0;
+
+ SDE_PLANE_DEBUG_START();
+ pstate = to_sde_plane_state(state);
+
+#define GET_PROPERTY(name, NAME, type) do { \
+ if (dev_priv->plane_property[PLANE_PROP_##NAME] == property) { \
+ *val = pstate->name; \
+ DBG("Get property %s %lld", #name, *val); \
+ goto done; \
+ } \
+ } while (0)
+
+ GET_PROPERTY(zpos, ZPOS, uint8_t);
+
+ dev_err(dev->dev, "Invalid property\n");
+ ret = -EINVAL;
+done:
+ SDE_PLANE_DEBUG_END();
+ return ret;
+#undef SET_PROPERTY
+}
+
+static void sde_plane_destroy(struct drm_plane *plane)
+{
+ struct sde_plane *psde = to_sde_plane(plane);
+
+ SDE_PLANE_DEBUG_START();
+
+ if (psde->pipe_hw)
+ sde_hw_sspp_destroy(psde->pipe_hw);
+
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(psde);
+
+ SDE_PLANE_DEBUG_END();
+}
+
+static void sde_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ SDE_PLANE_DEBUG_START();
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ kfree(to_sde_plane_state(state));
+ SDE_PLANE_DEBUG_END();
+}
+
+static struct drm_plane_state *
+sde_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct sde_plane_state *pstate;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ SDE_PLANE_DEBUG_START();
+ pstate = kmemdup(to_sde_plane_state(plane->state),
+ sizeof(*pstate), GFP_KERNEL);
+
+ if (pstate && pstate->base.fb)
+ drm_framebuffer_reference(pstate->base.fb);
+
+ pstate->mode_changed = false;
+ pstate->pending = false;
+ SDE_PLANE_DEBUG_END();
+
+ return &pstate->base;
+}
+
+static void sde_plane_reset(struct drm_plane *plane)
+{
+ struct sde_plane_state *pstate;
+
+ SDE_PLANE_DEBUG_START();
+ if (plane->state && plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ kfree(to_sde_plane_state(plane->state));
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+
+ memset(pstate, 0, sizeof(struct sde_plane_state));
+
+ /* assign default blend parameters */
+ pstate->alpha = 255;
+ pstate->premultiplied = 0;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ pstate->zpos = STAGE_BASE;
+ else
+ pstate->zpos = STAGE0 + drm_plane_index(plane);
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+ SDE_PLANE_DEBUG_END();
+}
+
+static const struct drm_plane_funcs sde_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sde_plane_destroy,
+ .set_property = sde_plane_set_property,
+ .atomic_set_property = sde_plane_atomic_set_property,
+ .atomic_get_property = sde_plane_atomic_get_property,
+ .reset = sde_plane_reset,
+ .atomic_duplicate_state = sde_plane_duplicate_state,
+ .atomic_destroy_state = sde_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
+ .prepare_fb = sde_plane_prepare_fb,
+ .cleanup_fb = sde_plane_cleanup_fb,
+ .atomic_check = sde_plane_atomic_check,
+ .atomic_update = sde_plane_atomic_update,
+};
+
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
+{
+ struct sde_plane *sde_plane = to_sde_plane(plane);
+
+ return sde_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe,
+ bool private_plane)
+{
+ static const char tmp_name[] = "---";
+ struct drm_plane *plane = NULL;
+ struct sde_plane *psde;
+ struct sde_hw_ctl *sde_ctl;
+ struct msm_drm_private *priv;
+ struct sde_kms *kms;
+ struct sde_mdss_cfg *sde_cat;
+ int ret;
+ enum drm_plane_type type;
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DRM_ERROR("[%u]Private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DRM_ERROR("[%u]Invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_sde_kms(priv->kms);
+
+ psde = kzalloc(sizeof(*psde), GFP_KERNEL);
+ if (!psde) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ memset(psde, 0, sizeof(*psde));
+
+ plane = &psde->base;
+
+ psde->pipe = pipe;
+ psde->name = tmp_name;
+
+ if (kms) {
+ /* mmu id for buffer mapping */
+ psde->mmu_id = kms->mmu_id;
+
+ /* check catalog for features mask */
+ sde_cat = kms->catalog;
+ if (sde_cat)
+ psde->features = sde_cat->sspp[pipe].features;
+ }
+ psde->nformats = mdp_get_formats(psde->formats,
+ ARRAY_SIZE(psde->formats),
+ !(psde->features & BIT(SDE_SSPP_CSC)) ||
+ !(psde->features & SDE_PLANE_FEATURE_SCALER));
+
+ type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &sde_plane_funcs,
+ psde->formats, psde->nformats,
+ type);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &sde_plane_helper_funcs);
+
+ sde_plane_install_properties(plane, &plane->base);
+
+ psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, sde_cat);
+ if (IS_ERR(psde->pipe_hw)) {
+ ret = PTR_ERR(psde->pipe_hw);
+ psde->pipe_hw = NULL;
+ goto fail;
+ }
+
+ /* cache flush mask for later */
+ sde_ctl = sde_hw_ctl_init(CTL_0, kms->mmio, sde_cat);
+ if (!IS_ERR(sde_ctl)) {
+ if (sde_ctl->ops.get_bitmask_sspp)
+ sde_ctl->ops.get_bitmask_sspp(sde_ctl,
+ &psde->flush_mask, pipe);
+ sde_hw_ctl_destroy(sde_ctl);
+ }
+
+ pr_err("%s: Successfully created plane\n", __func__);
+ return plane;
+
+fail:
+ pr_err("%s: Plane creation failed\n", __func__);
+ if (plane)
+ sde_plane_destroy(plane);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index e1c16aa5da43..add11d47ea2f 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -2084,6 +2084,9 @@ static int qpnp_pon_probe(struct platform_device *pdev)
return rc;
}
+ if (sys_reset)
+ boot_reason = ffs(pon_sts);
+
index = ffs(pon_sts) - 1;
cold_boot = !qpnp_pon_is_warm_reset();
if (index >= ARRAY_SIZE(qpnp_pon_reason) || index < 0) {
@@ -2297,8 +2300,6 @@ static int qpnp_pon_probe(struct platform_device *pdev)
list_add(&pon->list, &spon_dev_list);
spin_unlock_irqrestore(&spon_list_slock, flags);
pon->is_spon = true;
- } else {
- boot_reason = ffs(pon_sts);
}
/* config whether store the hard reset reason */
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
index d0843fb3a32c..79231fb314ad 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_tz_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -295,6 +295,7 @@ int32_t msm_camera_tz_ta_set_mode(uint32_t mode,
cmd_len = sizeof(struct msm_camera_tz_set_mode_req_t);
rsp_len = sizeof(struct msm_camera_tz_set_mode_rsp_t);
+ msm_camera_tz_lock();
rc = get_cmd_rsp_buffers(ta_qseecom_handle,
(void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
if (!rc) {
@@ -309,10 +310,12 @@ int32_t msm_camera_tz_ta_set_mode(uint32_t mode,
pr_err("%s:%d - Failed: rc=%d\n",
__func__, __LINE__,
rc);
+ msm_camera_tz_unlock();
return rc;
}
rc = rsp->rc;
}
+ msm_camera_tz_unlock();
CDBG("Done: rc=%d, Mode=0x%08X - %lluus\n",
rc, mode,
ktime_us_delta(ktime_get(), startTime));
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index 72acb822bb11..db1ca629cbd6 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -2,6 +2,7 @@ config ATH10K
tristate "Atheros 802.11ac wireless cards support"
depends on MAC80211 && HAS_DMA
select ATH_COMMON
+ select CRC32
---help---
This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets.
@@ -14,6 +15,12 @@ config ATH10K_PCI
---help---
This module adds support for PCIE bus
+config ATH10K_AHB
+ bool "Atheros ath10k AHB support"
+ depends on ATH10K_PCI && OF && RESET_CONTROLLER
+ ---help---
+ This module adds support for AHB bus
+
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index c04fb00e7930..930fadd940d8 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -25,5 +25,7 @@ obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
+ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
new file mode 100644
index 000000000000..766c63bf05c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include "core.h"
+#include "debug.h"
+#include "pci.h"
+#include "ahb.h"
+
+static const struct of_device_id ath10k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq4019-wifi",
+ .data = (void *)ATH10K_HW_QCA4019
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+
+static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
+{
+ return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
+}
+
+static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->gcc_mem + offset);
+}
+
+static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static int ath10k_ahb_get_num_banks(struct ath10k *ar)
+{
+ if (ar->hw_rev == ATH10K_HW_QCA4019)
+ return 1;
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_ahb_clock_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
+ ath10k_err(ar, "failed to get cmd clk: %ld\n",
+ PTR_ERR(ar_ahb->cmd_clk));
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
+ }
+
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
+ if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
+ ath10k_err(ar, "failed to get ref clk: %ld\n",
+ PTR_ERR(ar_ahb->ref_clk));
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
+ }
+
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
+ if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "failed to get rtc clk: %ld\n",
+ PTR_ERR(ar_ahb->rtc_clk));
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_clock_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->cmd_clk = NULL;
+ ar_ahb->ref_clk = NULL;
+ ar_ahb->rtc_clk = NULL;
+}
+
+static int ath10k_ahb_clock_enable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+ int ret;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "clock(s) is/are not initialized\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->cmd_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable cmd clk: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->ref_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable ref clk: %d\n", ret);
+ goto err_cmd_clk_disable;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->rtc_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable rtc clk: %d\n", ret);
+ goto err_ref_clk_disable;
+ }
+
+ return 0;
+
+err_ref_clk_disable:
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+err_cmd_clk_disable:
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_clock_disable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk))
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->ref_clk))
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+ if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk))
+ clk_disable_unprepare(ar_ahb->rtc_clk);
+}
+
+static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
+ ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->core_cold_rst));
+ return PTR_ERR(ar_ahb->core_cold_rst);
+ }
+
+ ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
+ ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_cold_rst));
+ return PTR_ERR(ar_ahb->radio_cold_rst);
+ }
+
+ ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
+ ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_warm_rst));
+ return PTR_ERR(ar_ahb->radio_warm_rst);
+ }
+
+ ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
+ ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_srif_rst));
+ return PTR_ERR(ar_ahb->radio_srif_rst);
+ }
+
+ ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->cpu_init_rst));
+ return PTR_ERR(ar_ahb->cpu_init_rst);
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->core_cold_rst = NULL;
+ ar_ahb->radio_cold_rst = NULL;
+ ar_ahb->radio_warm_rst = NULL;
+ ar_ahb->radio_srif_rst = NULL;
+ ar_ahb->cpu_init_rst = NULL;
+}
+
+static int ath10k_ahb_release_reset(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return -EINVAL;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_cold_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_warm_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_srif_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->cpu_init_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg,
+ u32 haltack_reg)
+{
+ unsigned long timeout;
+ u32 val;
+
+ /* Issue halt axi bus request */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val |= AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ /* Wait for axi bus halted ack */
+ timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT);
+ do {
+ val = ath10k_ahb_tcsr_read32(ar, haltack_reg);
+ if (val & AHB_AXI_BUS_HALT_ACK)
+ break;
+
+ mdelay(1);
+ } while (time_before(jiffies, timeout));
+
+ if (!(val & AHB_AXI_BUS_HALT_ACK)) {
+ ath10k_err(ar, "failed to halt axi bus: %d\n", val);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n");
+}
+
+static void ath10k_ahb_halt_chip(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg;
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return;
+ }
+
+ core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG);
+
+ switch (core_id) {
+ case 0:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK;
+ break;
+ case 1:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK;
+ break;
+ default:
+ ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n",
+ core_id);
+ return;
+ }
+
+ ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_assert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert core cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_warm_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_srif_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->cpu_init_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret);
+ msleep(10);
+
+ /* Clear halt req and core clock disable req before
+ * deasserting wifi core reset.
+ */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val &= ~AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_deassert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id);
+}
+
+static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_ahb->irq,
+ ath10k_ahb_interrupt_handler,
+ IRQF_SHARED, "ath10k_ahb", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_ahb->irq, ret);
+ return ret;
+ }
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+
+ return 0;
+}
+
+static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ free_irq(ar_ahb->irq, ar);
+}
+
+static void ath10k_ahb_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+}
+
+static int ath10k_ahb_resource_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct platform_device *pdev;
+ struct device *dev;
+ struct resource *res;
+ int ret;
+
+ pdev = ar_ahb->pdev;
+ dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath10k_err(ar, "failed to get memory resource\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ar_ahb->mem)) {
+ ath10k_err(ar, "mem ioremap error\n");
+ ret = PTR_ERR(ar_ahb->mem);
+ goto out;
+ }
+
+ ar_ahb->mem_len = resource_size(res);
+
+ ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ATH10K_GCC_REG_SIZE);
+ if (!ar_ahb->gcc_mem) {
+ ath10k_err(ar, "gcc mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+
+ ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ATH10K_TCSR_REG_SIZE);
+ if (!ar_ahb->tcsr_mem) {
+ ath10k_err(ar, "tcsr mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_gcc_mem_unmap;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
+ ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = ath10k_ahb_clock_init(ar);
+ if (ret)
+ goto err_tcsr_mem_unmap;
+
+ ret = ath10k_ahb_rst_ctrl_init(ar);
+ if (ret)
+ goto err_clock_deinit;
+
+ ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
+ if (ar_ahb->irq < 0) {
+ ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
+ goto err_clock_deinit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
+ ar_ahb->mem, ar_ahb->mem_len,
+ ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
+ return 0;
+
+err_clock_deinit:
+ ath10k_ahb_clock_deinit(ar);
+
+err_tcsr_mem_unmap:
+ iounmap(ar_ahb->tcsr_mem);
+
+err_gcc_mem_unmap:
+ ar_ahb->tcsr_mem = NULL;
+ iounmap(ar_ahb->gcc_mem);
+
+err_mem_unmap:
+ ar_ahb->gcc_mem = NULL;
+ devm_iounmap(&pdev->dev, ar_ahb->mem);
+
+out:
+ ar_ahb->mem = NULL;
+ return ret;
+}
+
+static void ath10k_ahb_resource_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (ar_ahb->mem)
+ devm_iounmap(dev, ar_ahb->mem);
+
+ if (ar_ahb->gcc_mem)
+ iounmap(ar_ahb->gcc_mem);
+
+ if (ar_ahb->tcsr_mem)
+ iounmap(ar_ahb->tcsr_mem);
+
+ ar_ahb->mem = NULL;
+ ar_ahb->gcc_mem = NULL;
+ ar_ahb->tcsr_mem = NULL;
+
+ ath10k_ahb_clock_deinit(ar);
+ ath10k_ahb_rst_ctrl_deinit(ar);
+}
+
+static int ath10k_ahb_prepare_device(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_ahb_clock_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Clock for the target is supplied from outside of target (ie,
+ * external clock module controlled by the host). Target needs
+ * to know what frequency target cpu is configured which is needed
+ * for target internal use. Read target cpu frequency info from
+ * gcc register and write into target's scratch register where
+ * target expects this information.
+ */
+ val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV);
+ ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val);
+
+ ret = ath10k_ahb_release_reset(ar);
+ if (ret)
+ goto err_clk_disable;
+
+ ath10k_ahb_irq_disable(ar);
+
+ ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret)
+ goto err_halt_chip;
+
+ return 0;
+
+err_halt_chip:
+ ath10k_ahb_halt_chip(ar);
+
+err_clk_disable:
+ ath10k_ahb_clock_disable(ar);
+
+ return ret;
+}
+
+static int ath10k_ahb_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_ahb_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_ahb_read32(ar, addr);
+ val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK;
+ ath10k_ahb_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_ahb_hif_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+
+ ath10k_pci_rx_post(ar);
+
+ return 0;
+}
+
+static void ath10k_ahb_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n");
+
+ ath10k_ahb_irq_disable(ar);
+ synchronize_irq(ar_ahb->irq);
+
+ ath10k_pci_flush(ar);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+}
+
+static int ath10k_ahb_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n");
+
+ ret = ath10k_ahb_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ ret = ath10k_ahb_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce_deinit;
+ }
+ napi_enable(&ar->napi);
+
+ return 0;
+
+err_ce_deinit:
+ ath10k_pci_ce_deinit(ar);
+out:
+ return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_ahb_hif_start,
+ .stop = ath10k_ahb_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_ahb_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+};
+
+static const struct ath10k_bus_ops ath10k_ahb_bus_ops = {
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+ .get_num_banks = ath10k_ahb_get_num_banks,
+};
+
+static int ath10k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath10k *ar;
+ struct ath10k_ahb *ar_ahb;
+ struct ath10k_pci *ar_pci;
+ const struct of_device_id *of_id;
+ enum ath10k_hw_rev hw_rev;
+ size_t size;
+ int ret;
+ u32 chip_id;
+
+ of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ hw_rev = (enum ath10k_hw_rev)of_id->data;
+
+ size = sizeof(*ar_pci) + sizeof(*ar_ahb);
+ ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
+ hw_rev, &ath10k_ahb_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->pdev = pdev;
+ platform_set_drvdata(pdev, ar);
+
+ ret = ath10k_ahb_resource_init(ar);
+ if (ret)
+ goto err_core_destroy;
+
+ ar->dev_id = 0;
+ ar_pci->mem = ar_ahb->mem;
+ ar_pci->mem_len = ar_ahb->mem_len;
+ ar_pci->ar = ar;
+ ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_resource_deinit;
+ }
+
+ ath10k_pci_init_napi(ar);
+
+ ret = ath10k_ahb_request_irq_legacy(ar);
+ if (ret)
+ goto err_free_pipes;
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ goto err_free_irq;
+
+ ath10k_pci_ce_deinit(ar);
+
+ chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
+ goto err_halt_device;
+ }
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_halt_device;
+ }
+
+ return 0;
+
+err_halt_device:
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+err_free_irq:
+ ath10k_ahb_release_irq_legacy(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_resource_deinit:
+ ath10k_ahb_resource_deinit(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath10k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_ahb *ar_ahb;
+
+ if (!ar)
+ return -EINVAL;
+
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!ar_ahb)
+ return -EINVAL;
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n");
+
+ ath10k_core_unregister(ar);
+ ath10k_ahb_irq_disable(ar);
+ ath10k_ahb_release_irq_legacy(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+ ath10k_ahb_resource_deinit(ar);
+ ath10k_core_destroy(ar);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath10k_ahb_driver = {
+ .driver = {
+ .name = "ath10k_ahb",
+ .of_match_table = ath10k_ahb_of_match,
+ },
+ .probe = ath10k_ahb_probe,
+ .remove = ath10k_ahb_remove,
+};
+
+int ath10k_ahb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ath10k_ahb_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+
+void ath10k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath10k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h
new file mode 100644
index 000000000000..d43e375215c8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _AHB_H_
+#define _AHB_H_
+
+#include <linux/platform_device.h>
+
+struct ath10k_ahb {
+ struct platform_device *pdev;
+ void __iomem *mem;
+ unsigned long mem_len;
+ void __iomem *gcc_mem;
+ void __iomem *tcsr_mem;
+
+ int irq;
+
+ struct clk *cmd_clk;
+ struct clk *ref_clk;
+ struct clk *rtc_clk;
+
+ struct reset_control *core_cold_rst;
+ struct reset_control *radio_cold_rst;
+ struct reset_control *radio_warm_rst;
+ struct reset_control *radio_srif_rst;
+ struct reset_control *cpu_init_rst;
+};
+
+#ifdef CONFIG_ATH10K_AHB
+
+#define ATH10K_GCC_REG_BASE 0x1800000
+#define ATH10K_GCC_REG_SIZE 0x60000
+
+#define ATH10K_TCSR_REG_BASE 0x1900000
+#define ATH10K_TCSR_REG_SIZE 0x80000
+
+#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020
+#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014
+
+#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030
+
+#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000
+#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004
+#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25)
+
+#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000
+#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010
+#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004
+#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014
+
+#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */
+#define AHB_AXI_BUS_HALT_REQ 1
+#define AHB_AXI_BUS_HALT_ACK 1
+
+#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1
+
+int ath10k_ahb_init(void);
+void ath10k_ahb_exit(void);
+
+#else /* CONFIG_ATH10K_AHB */
+
+static inline int ath10k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath10k_ahb_exit(void)
+{
+}
+
+#endif /* CONFIG_ATH10K_AHB */
+
+#endif /* _AHB_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 3d29b0875b3e..2872d347ea78 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -221,7 +221,7 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
u32 txlen;
int ret;
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
@@ -287,7 +287,7 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
int ret;
ath10k_dbg(ar, ATH10K_DBG_BMI,
- "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ "bmi fast download address 0x%x buffer 0x%pK length %d\n",
address, buffer, length);
ret = ath10k_bmi_lz_stream_start(ar, address);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index edf3629288bc..e7205546fa6b 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -39,7 +39,7 @@
* chooses what to send (buffer address, length). The destination
* side keeps a supply of "anonymous receive buffers" available and
* it handles incoming data as it arrives (when the destination
- * recieves an interrupt).
+ * receives an interrupt).
*
* The sender may send a simple buffer (address/length) or it may
* send a small list of buffers. When a small list is sent, hardware
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
lockdep_assert_held(&ar_pci->ce_lock);
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
*/
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
- struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
desc->nbytes = 0;
/* Return data from completed destination descriptor */
- *bufferp = __le32_to_cpu(sdesc.addr);
*nbytesp = nbytes;
- *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
- if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
- *flagsp = CE_RECV_FLAG_SWAPPED;
- else
- *flagsp = 0;
if (per_transfer_contextp)
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
- /* sanity */
- dest_ring->per_transfer_context[sw_index] = NULL;
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
- bufferp, nbytesp,
- transfer_idp, flagsp);
+ nbytesp);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -838,7 +840,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot init ce src ring id %d entries %d base_addr %p\n",
+ "boot init ce src ring id %d entries %d base_addr %pK\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
@@ -872,7 +874,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot ce dest ring id %d entries %d base_addr %p\n",
+ "boot ce dest ring id %d entries %d base_addr %pK\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
@@ -1048,11 +1050,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*
* For the lack of a better place do the check here.
*/
- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 47b734ce7ecf..dfc098606bee 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -22,7 +22,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
/* Data is byte-swapped */
@@ -177,10 +178,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Support clean shutdown by allowing the caller to cancel
@@ -413,9 +408,11 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
- (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+ (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+ (((idx) + (num)) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
ar->regs->ce_wrap_intr_sum_host_msi_lsb
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 531de256d58d..21ae8d663e67 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <asm/byteorder.h>
#include "core.h"
#include "mac.h"
@@ -55,18 +56,39 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
- .has_shifted_cc_wraparound = true,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .fw = QCA988X_HW_2_0_FW_FILE,
- .otp = QCA988X_HW_2_0_OTP_FILE,
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ },
+ {
+ .id = QCA9887_HW_1_0_VERSION,
+ .dev_id = QCA9887_1_0_DEVICE_ID,
+ .name = "qca9887 hw1.0",
+ .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA9887_HW_1_0_FW_DIR,
+ .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9887_BOARD_DATA_SZ,
+ .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -77,14 +99,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -95,14 +118,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -113,14 +137,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -131,15 +156,16 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -149,16 +175,71 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.uart_pin = 7,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
- .fw = QCA99X0_HW_2_0_FW_FILE,
- .otp = QCA99X0_HW_2_0_OTP_FILE,
.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ },
+ {
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9984_1_0_DEVICE_ID,
+ .name = "qca9984/qca9994 hw1.0",
+ .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9984_HW_1_0_FW_DIR,
+ .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ },
+ {
+ .id = QCA9888_HW_2_0_DEV_VERSION,
+ .dev_id = QCA9888_2_0_DEVICE_ID,
+ .name = "qca9888 hw2.0",
+ .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 3,
+ .rx_chain_mask = 3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9888_HW_2_0_FW_DIR,
+ .board = QCA9888_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -169,14 +250,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -187,14 +269,41 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ },
+ {
+ .id = QCA4019_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "qca4019 hw1.0",
+ .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x0010000,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 125000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0x3,
+ .rx_chain_mask = 0x3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA4019_HW_1_0_FW_DIR,
+ .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA4019_BOARD_DATA_SZ,
+ .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
},
};
@@ -211,6 +320,10 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
+ [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
+ [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+ [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+ [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -237,7 +350,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
int i;
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
- if (test_bit(i, ar->fw_features)) {
+ if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
if (len > 0)
len += scnprintf(buf + len, buf_len - len, ",");
@@ -429,18 +542,18 @@ exit:
return ret;
}
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
{
int ret;
- if (!ar->cal_file)
+ if (!file)
return -ENOENT;
- if (IS_ERR(ar->cal_file))
- return PTR_ERR(ar->cal_file);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- ret = ath10k_download_board_data(ar, ar->cal_file->data,
- ar->cal_file->size);
+ ret = ath10k_download_board_data(ar, file->data, file->size);
if (ret) {
ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
return ret;
@@ -451,7 +564,7 @@ static int ath10k_download_cal_file(struct ath10k *ar)
return 0;
}
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
{
struct device_node *node;
int data_len;
@@ -465,13 +578,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
*/
return -ENOENT;
- if (!of_get_property(node, "qcom,ath10k-calibration-data",
- &data_len)) {
+ if (!of_get_property(node, dt_name, &data_len)) {
/* The calibration data node is optional */
return -ENOENT;
}
- if (data_len != QCA988X_CAL_DATA_LEN) {
+ if (data_len != ar->hw_params.cal_data_len) {
ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
data_len);
ret = -EMSGSIZE;
@@ -484,8 +596,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
goto out;
}
- ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
- data, data_len);
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
ret);
@@ -508,6 +619,35 @@ out:
return ret;
}
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
+
static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
{
u32 result, address;
@@ -516,7 +656,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
address = ar->hw_params.patch_load_addr;
- if (!ar->otp_data || !ar->otp_len) {
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
ath10k_warn(ar,
"failed to retrieve board id because of invalid otp\n");
return -ENODATA;
@@ -524,9 +665,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot upload otp to 0x%x len %zd for board id\n",
- address, ar->otp_len);
+ address, ar->normal_mode_fw.fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp for board id check: %d\n",
ret);
@@ -564,7 +707,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
- ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
if (ret) {
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
@@ -572,16 +717,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len) {
- ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
- ar->otp_data, ar->otp_len);
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
return 0;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
- address, ar->otp_len);
+ address, ar->running_fw->fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp (%d)\n", ret);
return ret;
@@ -596,7 +745,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
- ar->fw_features)) &&
+ ar->running_fw->fw_file.fw_features)) &&
result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
@@ -605,46 +754,32 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return 0;
}
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
{
u32 address, data_len;
- const char *mode_name;
const void *data;
int ret;
address = ar->hw_params.patch_load_addr;
- switch (mode) {
- case ATH10K_FIRMWARE_MODE_NORMAL:
- data = ar->firmware_data;
- data_len = ar->firmware_len;
- mode_name = "normal";
- ret = ath10k_swap_code_seg_configure(ar,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
- if (ret) {
- ath10k_err(ar, "failed to configure fw code swap: %d\n",
- ret);
- return ret;
- }
- break;
- case ATH10K_FIRMWARE_MODE_UTF:
- data = ar->testmode.utf_firmware_data;
- data_len = ar->testmode.utf_firmware_len;
- mode_name = "utf";
- break;
- default:
- ath10k_err(ar, "unknown firmware mode: %d\n", mode);
- return -EINVAL;
+ data = ar->running_fw->fw_file.firmware_data;
+ data_len = ar->running_fw->fw_file.firmware_len;
+
+ ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %p len %d mode %s\n",
- data, data_len, mode_name);
+ "boot uploading firmware image %pK len %d\n",
+ data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
if (ret) {
- ath10k_err(ar, "failed to download %s firmware: %d\n",
- mode_name, ret);
+ ath10k_err(ar, "failed to download firmware: %d\n",
+ ret);
return ret;
}
@@ -653,42 +788,50 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
static void ath10k_core_free_board_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->board))
- release_firmware(ar->board);
+ if (!IS_ERR(ar->normal_mode_fw.board))
+ release_firmware(ar->normal_mode_fw.board);
- ar->board = NULL;
- ar->board_data = NULL;
- ar->board_len = 0;
+ ar->normal_mode_fw.board = NULL;
+ ar->normal_mode_fw.board_data = NULL;
+ ar->normal_mode_fw.board_len = 0;
}
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->otp))
- release_firmware(ar->otp);
-
- if (!IS_ERR(ar->firmware))
- release_firmware(ar->firmware);
+ if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+ release_firmware(ar->normal_mode_fw.fw_file.firmware);
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
- ath10k_swap_code_seg_release(ar);
+ if (!IS_ERR(ar->pre_cal_file))
+ release_firmware(ar->pre_cal_file);
+
+ ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
- ar->otp = NULL;
- ar->otp_data = NULL;
- ar->otp_len = 0;
+ ar->normal_mode_fw.fw_file.otp_data = NULL;
+ ar->normal_mode_fw.fw_file.otp_len = 0;
- ar->firmware = NULL;
- ar->firmware_data = NULL;
- ar->firmware_len = 0;
+ ar->normal_mode_fw.fw_file.firmware = NULL;
+ ar->normal_mode_fw.fw_file.firmware_data = NULL;
+ ar->normal_mode_fw.fw_file.firmware_len = 0;
ar->cal_file = NULL;
+ ar->pre_cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
{
char filename[100];
+ /* pre-cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (!IS_ERR(ar->pre_cal_file))
+ goto success;
+
/* cal-<bus>-<id>.bin */
scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -697,7 +840,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
if (IS_ERR(ar->cal_file))
/* calibration file is optional, don't print any warnings */
return PTR_ERR(ar->cal_file);
-
+success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
ATH10K_FW_DIR, filename);
@@ -711,14 +854,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
return -EINVAL;
}
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
return 0;
}
@@ -778,8 +921,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
"boot found board data for '%s'",
boardname);
- ar->board_data = board_ie_data;
- ar->board_len = board_ie_len;
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
ret = 0;
goto out;
@@ -812,12 +955,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
const u8 *data;
int ret, ie_id;
- ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- data = ar->board->data;
- len = ar->board->size;
+ data = ar->normal_mode_fw.board->data;
+ len = ar->normal_mode_fw.board->size;
/* magic has extra null byte padded */
magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -884,10 +1029,10 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
}
out:
- if (!ar->board_data || !ar->board_len) {
+ if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
ath10k_err(ar,
"failed to fetch board data for %s from %s/%s\n",
- ar->hw_params.fw.dir, boardname, filename);
+ boardname, ar->hw_params.fw.dir, filename);
ret = -ENODATA;
goto err;
}
@@ -952,51 +1097,8 @@ success:
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
- int ret = 0;
-
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
-
- ar->firmware = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.fw);
- if (IS_ERR(ar->firmware)) {
- ret = PTR_ERR(ar->firmware);
- ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
- goto err;
- }
-
- ar->firmware_data = ar->firmware->data;
- ar->firmware_len = ar->firmware->size;
-
- /* OTP may be undefined. If so, don't fetch it at all */
- if (ar->hw_params.fw.otp == NULL)
- return 0;
-
- ar->otp = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.otp);
- if (IS_ERR(ar->otp)) {
- ret = PTR_ERR(ar->otp);
- ath10k_err(ar, "could not fetch otp (%d)\n", ret);
- goto err;
- }
-
- ar->otp_data = ar->otp->data;
- ar->otp_len = ar->otp->size;
-
- return 0;
-
-err:
- ath10k_core_free_firmware_files(ar);
- return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file)
{
size_t magic_len, len, ie_len;
int ie_id, i, index, bit, ret;
@@ -1005,15 +1107,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
__le32 *timestamp, *version;
/* first fetch the firmware file (firmware-*.bin) */
- ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
- if (IS_ERR(ar->firmware)) {
+ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ name);
+ if (IS_ERR(fw_file->firmware)) {
ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
- ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
- return PTR_ERR(ar->firmware);
+ ar->hw_params.fw.dir, name,
+ PTR_ERR(fw_file->firmware));
+ return PTR_ERR(fw_file->firmware);
}
- data = ar->firmware->data;
- len = ar->firmware->size;
+ data = fw_file->firmware->data;
+ len = fw_file->firmware->size;
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1056,15 +1160,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
switch (ie_id) {
case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ if (ie_len > sizeof(fw_file->fw_version) - 1)
break;
- memcpy(ar->hw->wiphy->fw_version, data, ie_len);
- ar->hw->wiphy->fw_version[ie_len] = '\0';
+ memcpy(fw_file->fw_version, data, ie_len);
+ fw_file->fw_version[ie_len] = '\0';
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw version %s\n",
- ar->hw->wiphy->fw_version);
+ fw_file->fw_version);
break;
case ATH10K_FW_IE_TIMESTAMP:
if (ie_len != sizeof(u32))
@@ -1091,21 +1195,21 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Enabling feature bit: %i\n",
i);
- __set_bit(i, ar->fw_features);
+ __set_bit(i, fw_file->fw_features);
}
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->fw_features,
- sizeof(ar->fw_features));
+ fw_file->fw_features,
+ sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
- ar->firmware_data = data;
- ar->firmware_len = ie_len;
+ fw_file->firmware_data = data;
+ fw_file->firmware_len = ie_len;
break;
case ATH10K_FW_IE_OTP_IMAGE:
@@ -1113,8 +1217,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
"found otp image ie (%zd B)\n",
ie_len);
- ar->otp_data = data;
- ar->otp_len = ie_len;
+ fw_file->otp_data = data;
+ fw_file->otp_len = ie_len;
break;
case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1123,10 +1227,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->wmi.op_version = le32_to_cpup(version);
+ fw_file->wmi_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
- ar->wmi.op_version);
+ fw_file->wmi_op_version);
break;
case ATH10K_FW_IE_HTT_OP_VERSION:
if (ie_len != sizeof(u32))
@@ -1134,17 +1238,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->htt.op_version = le32_to_cpup(version);
+ fw_file->htt_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
- ar->htt.op_version);
+ fw_file->htt_op_version);
break;
case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw code swap image ie (%zd B)\n",
ie_len);
- ar->swap.firmware_codeswap_data = data;
- ar->swap.firmware_codeswap_len = ie_len;
+ fw_file->codeswap_data = data;
+ fw_file->codeswap_len = ie_len;
break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1159,7 +1263,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += ie_len;
}
- if (!ar->firmware_data || !ar->firmware_len) {
+ if (!fw_file->firmware_data ||
+ !fw_file->firmware_len) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@@ -1183,40 +1288,95 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 2;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0)
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+ &ar->normal_mode_fw.fw_file);
+ if (ret)
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
goto success;
+ }
- ar->fw_api = 1;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre calibration file, try DT next: %d\n",
+ ret);
- ret = ath10k_core_fetch_firmware_api_1(ar);
- if (ret)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "unable to load pre cal data from DT: %d\n", ret);
return ret;
+ }
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
success:
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to load pre cal data: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to get board id: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal configuration done successfully\n");
return 0;
}
@@ -1225,7 +1385,15 @@ static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
- ret = ath10k_download_cal_file(ar);
+ ret = ath10k_core_pre_cal_config(ar);
+ if (ret == 0)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal download procedure failed, try cal file: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;
goto done;
@@ -1235,14 +1403,24 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar);
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot did not find DT entry, try OTP next: %d\n",
+ "boot did not find DT entry, try target EEPROM next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_eeprom(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find target EEPROM entry, try OTP next: %d\n",
ret);
ret = ath10k_download_and_run_otp(ar);
@@ -1339,13 +1517,14 @@ static void ath10k_core_restart(struct work_struct *work)
ieee80211_stop_queues(ar->hw);
ath10k_drain_tx(ar);
- complete_all(&ar->scan.started);
- complete_all(&ar->scan.completed);
- complete_all(&ar->scan.on_channel);
- complete_all(&ar->offchan_tx_completed);
- complete_all(&ar->install_key_done);
- complete_all(&ar->vdev_setup_done);
- complete_all(&ar->thermal.wmi_sync);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->offchan_tx_completed);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->thermal.wmi_sync);
+ complete(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -1383,15 +1562,17 @@ static void ath10k_core_restart(struct work_struct *work)
static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
- !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
return -EINVAL;
}
- if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
- ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+ ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
return -EINVAL;
}
@@ -1403,7 +1584,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
break;
case ATH10K_CRYPT_MODE_SW:
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
return -EINVAL;
}
@@ -1422,7 +1603,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
if (rawmode) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
return -EINVAL;
}
@@ -1447,19 +1628,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
- if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
- ar->fw_features))
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ fw_file->fw_features))
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
else
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
} else {
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
}
}
- switch (ar->wmi.op_version) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->max_num_peers = TARGET_NUM_PEERS;
ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1472,8 +1653,13 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- ar->max_num_peers = TARGET_10X_NUM_PEERS;
- ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ if (ath10k_peer_stats_enabled(ar)) {
+ ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
+ } else {
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ }
ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
ar->fw_stats_req_mask = WMI_STAT_PEER;
@@ -1496,9 +1682,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
- ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
- ar->fw_stats_req_mask = WMI_STAT_PEER;
- ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
+ ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+ WMI_10_4_STAT_PEER_EXTD;
+ ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ fw_file->fw_features))
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1509,23 +1701,23 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
- if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
- switch (ar->wmi.op_version) {
+ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
- WARN_ON(1);
+ ath10k_err(ar, "htt op version not found from fw meta data");
return -EINVAL;
}
}
@@ -1533,14 +1725,67 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+ int ret;
+ int vdev_id;
+ int vdev_type;
+ int vdev_subtype;
+ const u8 *vdev_addr;
+
+ vdev_id = 0;
+ vdev_type = WMI_VDEV_TYPE_STA;
+ vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+ vdev_addr = ar->mac_addr;
+
+ ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+ vdev_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+ * serialized properly implicitly.
+ *
+ * Moreover (most) WMI commands have no explicit acknowledges. It is
+ * possible to infer it implicitly by poking firmware with echo
+ * command - getting a reply means all preceding comments have been
+ * (mostly) processed.
+ *
+ * In case of vdev create/delete this is sufficient.
+ *
+ * Without this it's possible to end up with a race when HTT Rx ring is
+ * started before vdev create/delete hack is complete allowing a short
+ * window of opportunity to receive (and Tx ACK) a bunch of frames.
+ */
+ ret = ath10k_wmi_barrier(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw)
{
int status;
+ u32 val;
lockdep_assert_held(&ar->conf_mutex);
clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+ ar->running_fw = fw;
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -1559,7 +1804,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
* to set the clock source once the target is initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
if (status) {
ath10k_err(ar, "could not write to skip_clock_init: %d\n",
@@ -1568,7 +1813,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
}
- status = ath10k_download_fw(ar, mode);
+ status = ath10k_download_fw(ar);
if (status)
goto err;
@@ -1656,6 +1901,33 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+ val = 0;
+ if (ath10k_peer_stats_enabled(ar))
+ val = WMI_10_4_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+ /* 10.4 firmware supports BT-Coex without reloading firmware
+ * via pdev param. To support Bluetooth coexistence pdev param,
+ * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+ * enabled always.
+ */
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features))
+ val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
+ status = ath10k_mac_ext_resource_config(ar, val);
+ if (status) {
+ ath10k_err(ar,
+ "failed to send ext resource cfg command : %d\n",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1669,6 +1941,25 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
}
+ /* Some firmware revisions do not properly set up hardware rx filter
+ * registers.
+ *
+ * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+ * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+ * any frames that matches MAC_PCU_RX_FILTER which is also
+ * misconfigured to accept anything.
+ *
+ * The ADDR1 is programmed using internal firmware structure field and
+ * can't be (easily/sanely) reached from the driver explicitly. It is
+ * possible to implicitly make it correct by creating a dummy vdev and
+ * then deleting it.
+ */
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar, "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
@@ -1681,7 +1972,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
}
- ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
INIT_LIST_HEAD(&ar->arvifs);
@@ -1790,11 +2084,27 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_power_down;
}
+ BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->hw->wiphy->fw_version));
+
+ ath10k_debug_print_hwfw_info(ar);
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
+
ret = ath10k_core_get_board_id_from_otp(ar);
if (ret && ret != -EOPNOTSUPP) {
- ath10k_err(ar, "failed to get board id from otp for qca99x0: %d\n",
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
ret);
- return ret;
+ goto err_free_firmware_files;
}
ret = ath10k_core_fetch_board_file(ar);
@@ -1803,6 +2113,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
+ ath10k_debug_print_board_info(ar);
+
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
ath10k_err(ar, "fatal problem with firmware features: %d\n",
@@ -1810,7 +2122,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar);
+ ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
@@ -1819,13 +2131,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
mutex_lock(&ar->conf_mutex);
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "could not init core (%d)\n", ret);
goto err_unlock;
}
- ath10k_print_driver_info(ar);
+ ath10k_debug_print_boot_info(ar);
ath10k_core_stop(ar);
mutex_unlock(&ar->conf_mutex);
@@ -1850,6 +2163,9 @@ static void ath10k_core_register_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, register_work);
int status;
+ /* peer stats are enabled by default */
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
status = ath10k_core_probe_fw(ar);
if (status) {
ath10k_err(ar, "could not probe fw (%d)\n", status);
@@ -1957,6 +2273,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
switch (hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
ar->regs = &qca988x_regs;
ar->hw_values = &qca988x_values;
break;
@@ -1966,9 +2283,18 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hw_values = &qca6174_values;
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
ar->regs = &qca99x0_regs;
ar->hw_values = &qca99x0_values;
break;
+ case ATH10K_HW_QCA9888:
+ ar->regs = &qca99x0_regs;
+ ar->hw_values = &qca9888_values;
+ break;
+ case ATH10K_HW_QCA4019:
+ ar->regs = &qca4019_regs;
+ ar->hw_values = &qca4019_values;
+ break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
@@ -1985,6 +2311,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->thermal.wmi_sync);
+ init_completion(&ar->bss_survey_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@@ -1998,7 +2325,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
+ spin_lock_init(&ar->txqs_lock);
+ INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
@@ -2014,6 +2343,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ init_dummy_netdev(&ar->napi_dev);
+
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_aux_wq;
@@ -2047,5 +2378,5 @@ void ath10k_core_destroy(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 858d75f49a9f..521f1c55c19e 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -44,8 +44,8 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
@@ -65,10 +65,15 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET 64
+#define ATH10K_NAPI_QUOTA_LIMIT 60
+
struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
+ ATH10K_BUS_AHB,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -76,31 +81,28 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
switch (bus) {
case ATH10K_BUS_PCI:
return "pci";
+ case ATH10K_BUS_AHB:
+ return "ahb";
}
return "unknown";
}
+enum ath10k_skb_flags {
+ ATH10K_SKB_F_NO_HWCRYPT = BIT(0),
+ ATH10K_SKB_F_DTIM_ZERO = BIT(1),
+ ATH10K_SKB_F_DELIVER_CAB = BIT(2),
+ ATH10K_SKB_F_MGMT = BIT(3),
+ ATH10K_SKB_F_QOS = BIT(4),
+};
+
struct ath10k_skb_cb {
dma_addr_t paddr;
+ u8 flags;
u8 eid;
- u8 vdev_id;
- enum ath10k_hw_txrx_mode txmode;
- bool is_protected;
-
- struct {
- u8 tid;
- u16 freq;
- bool is_offchan;
- bool nohwcrypt;
- struct ath10k_htt_txbuf *txbuf;
- u32 txbuf_paddr;
- } __packed htt;
-
- struct {
- bool dtim_zero;
- bool deliver_cab;
- } bcn;
+ u16 msdu_id;
+ struct ieee80211_vif *vif;
+ struct ieee80211_txq *txq;
} __packed;
struct ath10k_skb_rxcb {
@@ -141,16 +143,17 @@ struct ath10k_mem_chunk {
};
struct ath10k_wmi {
- enum ath10k_fw_wmi_op_version op_version;
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
+ struct completion barrier;
wait_queue_head_t tx_credits_wq;
DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
const struct wmi_ops *ops;
+ const struct wmi_peer_flags_map *peer_flags;
u32 num_mem_chunks;
u32 rx_decap_mode;
@@ -164,6 +167,14 @@ struct ath10k_fw_stats_peer {
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
+ u32 rx_duration;
+};
+
+struct ath10k_fw_extd_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 rx_duration;
};
struct ath10k_fw_stats_vdev {
@@ -190,10 +201,10 @@ struct ath10k_fw_stats_pdev {
/* PDEV stats */
s32 ch_noise_floor;
- u32 tx_frame_count;
- u32 rx_frame_count;
- u32 rx_clear_count;
- u32 cycle_count;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
u32 phy_err_count;
u32 chan_tx_power;
u32 ack_rx_bad;
@@ -257,9 +268,11 @@ struct ath10k_fw_stats_pdev {
};
struct ath10k_fw_stats {
+ bool extended;
struct list_head pdevs;
struct list_head vdevs;
struct list_head peers;
+ struct list_head peers_extd;
};
#define ATH10K_TPC_TABLE_TYPE_FLAG 1
@@ -298,6 +311,9 @@ struct ath10k_dfs_stats {
struct ath10k_peer {
struct list_head list;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -306,6 +322,12 @@ struct ath10k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
+struct ath10k_txq {
+ struct list_head list;
+ unsigned long num_fw_queued;
+ unsigned long num_push_allowed;
+};
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -314,16 +336,18 @@ struct ath10k_sta {
u32 bw;
u32 nss;
u32 smps;
+ u16 peer_id;
struct work_struct update_wk;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
+ u64 rx_duration;
#endif
};
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +359,7 @@ struct ath10k_vif {
struct list_head list;
u32 vdev_id;
+ u16 peer_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
@@ -420,11 +445,12 @@ struct ath10k_debug {
struct completion tpc_complete;
/* protected by conf_mutex */
- u32 fw_dbglog_mask;
+ u64 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter;
u32 reg_addr;
u32 nf_cal_period;
+ void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
@@ -512,6 +538,32 @@ enum ath10k_fw_features {
/* Firmware Supports Adaptive CCA*/
ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
+ /* Firmware supports management frame protection */
+ ATH10K_FW_FEATURE_MFP_SUPPORT = 12,
+
+ /* Firmware supports pull-push model where host shares it's software
+ * queue state with firmware and firmware generates fetch requests
+ * telling host which queues to dequeue tx from.
+ *
+ * Primary function of this is improved MU-MIMO performance with
+ * multiple clients.
+ */
+ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+
+ /* Firmware supports BT-Coex without reloading firmware via pdev param.
+ * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+ * extended resource config should be enabled always. This firmware IE
+ * is used to configure WMI_COEX_GPIO_SUPPORT.
+ */
+ ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
+ /* Older firmware with HTT delivers incorrect tx status for null func
+ * frames to driver, but this fixed in 10.2 and 10.4 firmware versions.
+ * Also this workaround results in reporting of incorrect null func
+ * status for 10.4. This flag is used to skip the workaround.
+ */
+ ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -534,12 +586,21 @@ enum ath10k_dev_flags {
/* Disable HW crypto engine */
ATH10K_FLAG_HW_CRYPTO_DISABLED,
+
+ /* Bluetooth coexistance enabled */
+ ATH10K_FLAG_BTCOEX,
+
+ /* Per Station statistics service */
+ ATH10K_FLAG_PEER_STATS,
};
enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
ATH10K_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_FILE,
+ ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_CAL_MODE_EEPROM,
};
enum ath10k_crypt_mode {
@@ -558,6 +619,12 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "otp";
case ATH10K_CAL_MODE_DT:
return "dt";
+ case ATH10K_PRE_CAL_MODE_FILE:
+ return "pre-cal-file";
+ case ATH10K_PRE_CAL_MODE_DT:
+ return "pre-cal-dt";
+ case ATH10K_CAL_MODE_EEPROM:
+ return "eeprom";
}
return "unknown";
@@ -591,9 +658,47 @@ enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_MAX,
};
+struct ath10k_fw_file {
+ const struct firmware *firmware;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ enum ath10k_fw_wmi_op_version wmi_op_version;
+ enum ath10k_fw_htt_op_version htt_op_version;
+
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const void *otp_data;
+ size_t otp_len;
+
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ /* The original idea of struct ath10k_fw_file was that it only
+ * contains struct firmware and pointers to various parts (actual
+ * firmware binary, otp, metadata etc) of the file. This seg_info
+ * is actually created separate but as this is used similarly as
+ * the other firmware components it's more convenient to have it
+ * here.
+ */
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+};
+
+struct ath10k_fw_components {
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
+ struct ath10k_fw_file fw_file;
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
struct device *dev;
u8 mac_addr[ETH_ALEN];
@@ -616,8 +721,6 @@ struct ath10k {
/* protected by conf_mutex */
bool ani_enabled;
- DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
bool p2p;
struct {
@@ -634,65 +737,20 @@ struct ath10k {
struct ath10k_htc htc;
struct ath10k_htt htt;
- struct ath10k_hw_params {
- u32 id;
- u16 dev_id;
- const char *name;
- u32 patch_load_addr;
- int uart_pin;
- u32 otp_exe_param;
-
- /* This is true if given HW chip has a quirky Cycle Counter
- * wraparound which resets to 0x7fffffff instead of 0. All
- * other CC related counters (e.g. Rx Clear Count) are divided
- * by 2 so they never wraparound themselves.
- */
- bool has_shifted_cc_wraparound;
-
- /* Some of chip expects fragment descriptor to be continuous
- * memory for any TX operation. Set continuous_frag_desc flag
- * for the hardware which have such requirement.
- */
- bool continuous_frag_desc;
-
- u32 channel_counters_freq_hz;
-
- /* Mgmt tx descriptors threshold for limiting probe response
- * frames.
- */
- u32 max_probe_resp_desc_thres;
-
- struct ath10k_hw_params_fw {
- const char *dir;
- const char *fw;
- const char *otp;
- const char *board;
- size_t board_size;
- size_t board_ext_size;
- } fw;
- } hw_params;
+ struct ath10k_hw_params hw_params;
- const struct firmware *board;
- const void *board_data;
- size_t board_len;
+ /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+ struct ath10k_fw_components normal_mode_fw;
- const struct firmware *otp;
- const void *otp_data;
- size_t otp_len;
-
- const struct firmware *firmware;
- const void *firmware_data;
- size_t firmware_len;
+ /* READ-ONLY images of the running firmware, which can be either
+ * normal or UTF. Do not modify, release etc!
+ */
+ const struct ath10k_fw_components *running_fw;
+ const struct firmware *pre_cal_file;
const struct firmware *cal_file;
struct {
- const void *firmware_codeswap_data;
- size_t firmware_codeswap_len;
- struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
- } swap;
-
- struct {
u32 vendor;
u32 device;
u32 subsystem_vendor;
@@ -720,7 +778,7 @@ struct ath10k {
} scan;
struct {
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
@@ -732,6 +790,9 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
+ /* currently configured operating channel in firmware */
+ struct ieee80211_channel *tgt_oper_chan;
+
unsigned long long free_vdev_map;
struct ath10k_vif *monitor_arvif;
bool monitor;
@@ -762,9 +823,13 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ /* protects: ar->txqs, artxq->list */
+ spinlock_t txqs_lock;
+ struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
+ struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
wait_queue_head_t peer_mapping_wq;
/* protected by conf_mutex */
@@ -807,6 +872,7 @@ struct ath10k {
* avoid reporting garbage data.
*/
bool ch_info_can_report_survey;
+ struct completion bss_survey_done;
struct dfs_pattern_detector *dfs_detector;
@@ -814,8 +880,6 @@ struct ath10k {
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
-#endif
-
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
@@ -824,16 +888,12 @@ struct ath10k {
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
+#endif
struct {
/* protected by conf_mutex */
- const struct firmware *utf;
- char utf_version[32];
- const void *utf_firmware_data;
- size_t utf_firmware_len;
- DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
- enum ath10k_fw_wmi_op_version orig_wmi_op_version;
- enum ath10k_fw_wmi_op_version op_version;
+ struct ath10k_fw_components utf_mode_fw;
+
/* protected by data_lock */
bool utf_monitor;
} testmode;
@@ -848,10 +908,23 @@ struct ath10k {
struct ath10k_thermal thermal;
struct ath10k_wow wow;
+ /* NAPI */
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ return true;
+
+ return false;
+}
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
@@ -860,8 +933,11 @@ void ath10k_core_destroy(struct ath10k *ar);
void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file);
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 1a88a24ffeac..82a4c67f3672 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -19,6 +19,8 @@
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/utsname.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
#include "core.h"
#include "debug.h"
@@ -28,6 +30,8 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
@@ -122,43 +126,73 @@ void ath10k_info(struct ath10k *ar, const char *fmt, ...)
}
EXPORT_SYMBOL(ath10k_info);
-void ath10k_print_driver_info(struct ath10k *ar)
+void ath10k_debug_print_hwfw_info(struct ath10k *ar)
{
+ const struct firmware *firmware;
char fw_features[128] = {};
- char boardinfo[100];
+ u32 crc = 0;
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
- if (ar->id.bmi_ids_valid)
- scnprintf(boardinfo, sizeof(boardinfo), "bmi %d:%d",
- ar->id.bmi_chip_id, ar->id.bmi_board_id);
- else
- scnprintf(boardinfo, sizeof(boardinfo), "sub %04x:%04x",
- ar->id.subsystem_vendor, ar->id.subsystem_device);
-
- ath10k_info(ar, "%s (0x%08x, 0x%08x %s) fw %s fwapi %d bdapi %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
+ ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
- boardinfo,
+ ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+ ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
+
+ firmware = ar->normal_mode_fw.fw_file.firmware;
+ if (firmware)
+ crc = crc32_le(0, firmware->data, firmware->size);
+
+ ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
ar->hw->wiphy->fw_version,
ar->fw_api,
+ fw_features,
+ crc);
+}
+
+void ath10k_debug_print_board_info(struct ath10k *ar)
+{
+ char boardinfo[100];
+
+ if (ar->id.bmi_ids_valid)
+ scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
+ ar->id.bmi_chip_id, ar->id.bmi_board_id);
+ else
+ scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+
+ ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
ar->bd_api,
+ boardinfo,
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
+}
+
+void ath10k_debug_print_boot_info(struct ath10k *ar)
+{
+ ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
ar->htt.target_version_major,
ar->htt.target_version_minor,
- ar->wmi.op_version,
- ar->htt.op_version,
+ ar->normal_mode_fw.fw_file.wmi_op_version,
+ ar->normal_mode_fw.fw_file.htt_op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations,
test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
- !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
- fw_features);
- ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
- config_enabled(CONFIG_ATH10K_DEBUG),
- config_enabled(CONFIG_ATH10K_DEBUGFS),
- config_enabled(CONFIG_ATH10K_TRACING),
- config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
- config_enabled(CONFIG_NL80211_TESTMODE));
+ !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags));
+}
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+ ath10k_debug_print_hwfw_info(ar);
+ ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_boot_info(ar);
}
EXPORT_SYMBOL(ath10k_print_driver_info);
@@ -251,7 +285,7 @@ static const struct file_operations fops_wmi_services = {
.llseek = default_llseek,
};
-static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
+static void ath10k_fw_stats_pdevs_free(struct list_head *head)
{
struct ath10k_fw_stats_pdev *i, *tmp;
@@ -261,7 +295,7 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
}
}
-static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
+static void ath10k_fw_stats_vdevs_free(struct list_head *head)
{
struct ath10k_fw_stats_vdev *i, *tmp;
@@ -271,7 +305,7 @@ static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head)
}
}
-static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
+static void ath10k_fw_stats_peers_free(struct list_head *head)
{
struct ath10k_fw_stats_peer *i, *tmp;
@@ -281,13 +315,25 @@ static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
}
}
+static void ath10k_fw_extd_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_extd_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false;
- ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
- ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ar->debug.fw_stats.extended = false;
+ ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
spin_unlock_bh(&ar->data_lock);
}
@@ -302,6 +348,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
INIT_LIST_HEAD(&stats.pdevs);
INIT_LIST_HEAD(&stats.vdevs);
INIT_LIST_HEAD(&stats.peers);
+ INIT_LIST_HEAD(&stats.peers_extd);
spin_lock_bh(&ar->data_lock);
ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
@@ -321,9 +368,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* b) consume stat update events until another one with pdev stats is
* delivered which is treated as end-of-data and is itself discarded
*/
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_sta_update_rx_duration(ar, &stats);
if (ar->debug.fw_stats_done) {
- ath10k_warn(ar, "received unsolicited stats update event\n");
+ if (!ath10k_peer_stats_enabled(ar))
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+
goto free;
}
@@ -347,17 +398,21 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
/* Although this is unlikely impose a sane limit to
* prevent firmware from DoS-ing the host.
*/
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
ath10k_warn(ar, "dropping fw peer stats\n");
goto free;
}
if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
ath10k_warn(ar, "dropping fw vdev stats\n");
goto free;
}
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+ list_splice_tail_init(&stats.peers_extd,
+ &ar->debug.fw_stats.peers_extd);
}
complete(&ar->debug.fw_stats_complete);
@@ -366,9 +421,10 @@ free:
/* In some cases lists have been spliced and cleared. Free up
* resources if that is not the case.
*/
- ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
- ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
- ath10k_debug_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&stats.vdevs);
+ ath10k_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_extd_stats_peers_free(&stats.peers_extd);
spin_unlock_bh(&ar->data_lock);
}
@@ -571,25 +627,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
char buf[32];
int ret;
- mutex_lock(&ar->conf_mutex);
-
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
+ /* drop the possible '\n' from the end */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
ret = -ENETDOWN;
goto exit;
}
- /* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n') {
- buf[count - 1] = 0;
- count--;
- }
-
if (!strcmp(buf, "soft")) {
ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -1114,7 +1168,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
{
struct ath10k *ar = file->private_data;
char buf[64];
- u8 amsdu = 3, ampdu = 64;
+ u8 amsdu, ampdu;
unsigned int len;
mutex_lock(&ar->conf_mutex);
@@ -1176,9 +1230,9 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
unsigned int len;
- char buf[64];
+ char buf[96];
- len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
+ len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -1190,15 +1244,16 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
{
struct ath10k *ar = file->private_data;
int ret;
- char buf[64];
- unsigned int log_level, mask;
+ char buf[96];
+ unsigned int log_level;
+ u64 mask;
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
- ret = sscanf(buf, "%x %u", &mask, &log_level);
+ ret = sscanf(buf, "%llx %u", &mask, &log_level);
if (!ret)
return -EINVAL;
@@ -1398,74 +1453,68 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
-static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
{
- struct ath10k *ar = inode->i_private;
- void *buf;
u32 hi_addr;
__le32 addr;
int ret;
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH10K_STATE_ON &&
- ar->state != ATH10K_STATE_UTF) {
- ret = -ENETDOWN;
- goto err;
- }
+ lockdep_assert_held(&ar->conf_mutex);
- buf = vmalloc(QCA988X_CAL_DATA_LEN);
- if (!buf) {
- ret = -ENOMEM;
- goto err;
- }
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
- ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
- goto err_vfree;
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
}
- ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
- QCA988X_CAL_DATA_LEN);
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
- goto err_vfree;
+ return ret;
}
- file->private_data = buf;
+ return 0;
+}
- mutex_unlock(&ar->conf_mutex);
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
- return 0;
+ mutex_lock(&ar->conf_mutex);
-err_vfree:
- vfree(buf);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
-err:
+ file->private_data = ar;
mutex_unlock(&ar->conf_mutex);
- return ret;
+ return 0;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- void *buf = file->private_data;
+ struct ath10k *ar = file->private_data;
- return simple_read_from_buffer(user_buf, count, ppos,
- buf, QCA988X_CAL_DATA_LEN);
-}
+ mutex_lock(&ar->conf_mutex);
-static int ath10k_debug_cal_data_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
- return 0;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
}
static ssize_t ath10k_write_ani_enable(struct file *file,
@@ -1526,7 +1575,6 @@ static const struct file_operations fops_ani_enable = {
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
- .release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
@@ -1878,6 +1926,8 @@ void ath10k_debug_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_cal_data_fetch(ar);
+
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
@@ -2079,15 +2129,225 @@ static const struct file_operations fops_quiet_period = {
.open = simple_open
};
+static ssize_t ath10k_write_btcoex(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+ u32 pdev_param;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ pdev_param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
+ ret = count;
+ goto exit;
+ }
+ } else {
+ ath10k_info(ar, "restarting firmware due to btcoex change");
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = ath10k_read_btcoex,
+ .write = ath10k_write_btcoex,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_peer_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+ .read = ath10k_read_peer_stats,
+ .write = ath10k_write_peer_stats,
+ .open = simple_open
+};
+
+static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned int len = 0, buf_len = 4096;
+ ssize_t ret_cnt;
+ char *buf;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "firmware-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+ ar->normal_mode_fw.fw_file.firmware->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "athwlan\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+ ar->normal_mode_fw.fw_file.firmware_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "otp\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "codeswap\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+ ar->normal_mode_fw.fw_file.codeswap_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "board-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "board\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board_data,
+ ar->normal_mode_fw.board_len));
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fw_checksums = {
+ .read = ath10k_debug_fw_checksums_read,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data)
return -ENOMEM;
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd);
return 0;
}
@@ -2097,6 +2357,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
@@ -2128,8 +2391,8 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
- debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_simulate_fw_crash);
+ debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash);
debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_crash_dump);
@@ -2146,15 +2409,15 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_chip_id);
- debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_htt_stats_mask);
+ debugfs_create_file("htt_stats_mask", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_htt_stats_mask);
debugfs_create_file("htt_max_amsdu_ampdu", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_htt_max_amsdu_ampdu);
- debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_fw_dbglog);
+ debugfs_create_file("fw_dbglog", S_IRUSR | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_fw_dbglog);
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
@@ -2165,7 +2428,7 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_simulate_radar);
@@ -2188,6 +2451,18 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("tpc_stats", S_IRUSR,
ar->debug.debugfs_phy, ar, &fops_tpc_stats);
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar, &fops_btcoex);
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_peer_stats);
+
+ debugfs_create_file("fw_checksums", S_IRUSR,
+ ar->debug.debugfs_phy, ar, &fops_fw_checksums);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 7de780c4ec8d..c458fa96a6d4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -37,6 +37,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_TESTMODE = 0x00001000,
ATH10K_DBG_WMI_PRINT = 0x00002000,
ATH10K_DBG_PCI_PS = 0x00004000,
+ ATH10K_DBG_AHB = 0x00008000,
ATH10K_DBG_ANY = 0xffffffff,
};
@@ -56,13 +57,17 @@ enum ath10k_dbg_aggr_mode {
};
/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar);
+void ath10k_debug_print_board_info(struct ath10k *ar);
+void ath10k_debug_print_boot_info(struct ath10k *ar);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
@@ -149,6 +154,17 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#ifdef CONFIG_MAC80211_DEBUGFS
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats);
+void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo);
+#else
+static inline
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+}
#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 95b5c49374e0..9955fea0802a 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -18,6 +18,67 @@
#include "wmi-ops.h"
#include "debug.h"
+static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_extd_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers_extd, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ if (stats->extended)
+ ath10k_sta_update_extd_stats_rx_duration(ar, stats);
+ else
+ ath10k_sta_update_stats_rx_duration(ar, stats);
+}
+
+void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION;
+}
+
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 89e7076c919f..b2566b06e1e1 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -87,6 +87,10 @@ struct ath10k_hif_ops {
int (*suspend)(struct ath10k *ar);
int (*resume)(struct ath10k *ar);
+
+ /* fetch calibration data from target eeprom */
+ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+ size_t *data_len);
};
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar,
ar->hif.ops->write32(ar, address, data);
}
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+ void **data,
+ size_t *data_len)
+{
+ if (!ar->hif.ops->fetch_cal_eeprom)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 5b3c6bcf9598..175aae38c375 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -44,7 +44,7 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
@@ -62,7 +62,7 @@ static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
{
struct ath10k *ar = ep->htc->ar;
- ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
@@ -404,7 +404,7 @@ void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
goto out;
}
- ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e70aa38e6e05..0c55cd92a951 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -22,7 +22,6 @@
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/skbuff.h>
-#include <linux/semaphore.h>
#include <linux/timer.h>
struct ath10k;
@@ -297,10 +296,10 @@ struct ath10k_htc_svc_conn_resp {
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
-#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
-#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 3e6ba63dfdff..130cd9502021 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -131,12 +131,12 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
- [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
- HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
- [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
- HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
};
int ath10k_htt_connect(struct ath10k_htt *htt)
@@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
- conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
- switch (ar->htt.op_version) {
+ switch (ar->running_fw->fw_file.htt_op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_4:
ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
@@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar)
return 0;
}
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 2bad50e520b5..0d2ed09f202b 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/hashtable.h>
+#include <linux/kfifo.h>
#include <net/mac80211.h>
#include "htc.h"
@@ -52,6 +53,7 @@ enum htt_h2t_msg_type { /* host-to-target */
/* This command is used for sending management frames in HTT < 3.0.
* HTT >= 3.0 uses TX_FRM for everything. */
HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+ HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
HTT_H2T_NUM_MSGS /* keep this last */
};
@@ -166,8 +168,13 @@ struct htt_data_tx_desc {
__le16 len;
__le16 id;
__le32 frags_paddr;
- __le16 peerid;
- __le16 freq;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
@@ -408,10 +415,10 @@ enum htt_10_4_t2h_msg_type {
HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
- HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
/* 0x19 to 0x2f are reserved */
- HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30,
+ HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
/* keep this last */
HTT_10_4_T2H_NUM_MSGS
};
@@ -444,8 +451,8 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_TEST,
HTT_T2H_MSG_TYPE_EN_STATS,
HTT_T2H_MSG_TYPE_TX_FETCH_IND,
- HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
- HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
/* keep this last */
HTT_T2H_NUM_MSGS
};
@@ -478,10 +485,10 @@ struct htt_mgmt_tx_completion {
__le32 status;
} __packed;
-#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
-#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6)
-#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
@@ -588,7 +595,7 @@ enum htt_rx_mpdu_status {
/* only accept EAPOL frames */
HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
- /* Non-data in promiscous mode */
+ /* Non-data in promiscuous mode */
HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
@@ -893,7 +900,7 @@ struct htt_rx_in_ord_ind {
* Purpose: indicate how many 32-bit integers follow the message header
* - NUM_CHARS
* Bits 31:16
- * Purpose: indicate how many 8-bit charaters follow the series of integers
+ * Purpose: indicate how many 8-bit characters follow the series of integers
*/
struct htt_rx_test {
u8 num_ints;
@@ -1035,10 +1042,10 @@ struct htt_dbg_stats_wal_tx_stats {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_tx_timeout;
/* wal pdev resets */
@@ -1301,9 +1308,43 @@ struct htt_frag_desc_bank_id {
* so we use a conservatively safe value for now */
#define HTT_FRAG_DESC_BANK_MAX 4
-#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
-#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
-#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
+
+enum htt_q_depth_type {
+ HTT_Q_DEPTH_TYPE_BYTES = 0,
+ HTT_Q_DEPTH_TYPE_MSDUS = 1,
+};
+
+#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
+ TARGET_10_4_NUM_VDEVS)
+#define HTT_TX_Q_STATE_NUM_TIDS 8
+#define HTT_TX_Q_STATE_ENTRY_SIZE 1
+#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
+
+/**
+ * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ *
+ * Defines host q state format and behavior. See htt_q_state.
+ *
+ * @record_size: Defines the size of each host q entry in bytes. In practice
+ * however firmware (at least 10.4.3-00191) ignores this host
+ * configuration value and uses hardcoded value of 1.
+ * @record_multiplier: This is valid only when q depth type is MSDUs. It
+ * defines the exponent for the power of 2 multiplication.
+ */
+struct htt_q_state_conf {
+ __le32 paddr;
+ __le16 num_peers;
+ __le16 num_tids;
+ u8 record_size;
+ u8 record_multiplier;
+ u8 pad[2];
+} __packed;
struct htt_frag_desc_bank_cfg {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
@@ -1311,6 +1352,122 @@ struct htt_frag_desc_bank_cfg {
u8 desc_size;
__le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
+#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
+#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
+
+/**
+ * htt_q_state - shared between host and firmware via DMA
+ *
+ * This structure is used for the host to expose it's software queue state to
+ * firmware so that its rate control can schedule fetch requests for optimized
+ * performance. This is most notably used for MU-MIMO aggregation when multiple
+ * MU clients are connected.
+ *
+ * @count: Each element defines the host queue depth. When q depth type was
+ * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
+ * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
+ * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
+ * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
+ * record_multiplier (see htt_q_state_conf).
+ * @map: Used by firmware to quickly check which host queues are not empty. It
+ * is a bitmap simply saying.
+ * @seq: Used by firmware to quickly check if the host queues were updated
+ * since it last checked.
+ *
+ * FIXME: Is the q_state map[] size calculation really correct?
+ */
+struct htt_q_state {
+ u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
+ u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
+ __le32 seq;
+} __packed;
+
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
+#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
+#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
+
+struct htt_tx_fetch_record {
+ __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
+ __le16 num_msdus;
+ __le32 num_bytes;
+} __packed;
+
+struct htt_tx_fetch_ind {
+ u8 pad0;
+ __le16 fetch_seq_num;
+ __le32 token;
+ __le16 num_resp_ids;
+ __le16 num_records;
+ struct htt_tx_fetch_record records[0];
+ __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+} __packed;
+
+static inline void *
+ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
+{
+ return (void *)&ind->records[le16_to_cpu(ind->num_records)];
+}
+
+struct htt_tx_fetch_resp {
+ u8 pad0;
+ __le16 resp_id;
+ __le16 fetch_seq_num;
+ __le16 num_records;
+ __le32 token;
+ struct htt_tx_fetch_record records[0];
+} __packed;
+
+struct htt_tx_fetch_confirm {
+ u8 pad0;
+ __le16 num_resp_ids;
+ __le32 resp_ids[0];
+} __packed;
+
+enum htt_tx_mode_switch_mode {
+ HTT_TX_MODE_SWITCH_PUSH = 0,
+ HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
+};
+
+#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
+
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
+
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
+
+struct htt_tx_mode_switch_record {
+ __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
+ __le16 num_max_msdus;
+} __packed;
+
+struct htt_tx_mode_switch_ind {
+ u8 pad0;
+ __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
+ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
+ u8 pad1[2];
+ struct htt_tx_mode_switch_record records[0];
+} __packed;
+
+struct htt_channel_change {
+ u8 pad[3];
+ __le32 freq;
+ __le32 center_freq1;
+ __le32 center_freq2;
+ __le32 phymode;
} __packed;
union htt_rx_pn_t {
@@ -1318,10 +1475,10 @@ union htt_rx_pn_t {
u32 pn24;
/* TKIP or CCMP: 48-bit PN */
- u_int64_t pn48;
+ u64 pn48;
/* WAPI: 128-bit PN */
- u_int64_t pn128[2];
+ u64 pn128[2];
};
struct htt_cmd {
@@ -1335,6 +1492,7 @@ struct htt_cmd {
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
+ struct htt_tx_fetch_resp tx_fetch_resp;
};
} __packed;
@@ -1359,16 +1517,25 @@ struct htt_resp {
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;
+ struct htt_tx_fetch_ind tx_fetch_ind;
+ struct htt_tx_fetch_confirm tx_fetch_confirm;
+ struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+ struct htt_channel_change chan_change;
};
} __packed;
/*** host side structures follow ***/
struct htt_tx_done {
- u32 msdu_id;
- bool discard;
- bool no_ack;
- bool success;
+ u16 msdu_id;
+ u16 status;
+};
+
+enum htt_tx_compl_state {
+ HTT_TX_COMPL_STATE_NONE,
+ HTT_TX_COMPL_STATE_ACK,
+ HTT_TX_COMPL_STATE_NOACK,
+ HTT_TX_COMPL_STATE_DISCARD,
};
struct htt_peer_map_event {
@@ -1395,7 +1562,6 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
- enum ath10k_fw_htt_op_version op_version;
u8 max_num_amsdu;
u8 max_num_ampdu;
@@ -1489,17 +1655,19 @@ struct ath10k_htt {
struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
+ /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
- struct tasklet_struct rx_replenish_task;
+ atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
- struct tasklet_struct txrx_compl_task;
- struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
+ struct sk_buff_head tx_fetch_ind_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
@@ -1513,6 +1681,17 @@ struct ath10k_htt {
dma_addr_t paddr;
struct ath10k_htt_txbuf *vaddr;
} txbuf;
+
+ struct {
+ bool enabled;
+ struct htt_q_state *vaddr;
+ dma_addr_t paddr;
+ u16 num_push_allowed;
+ u16 num_peers;
+ u16 num_tids;
+ enum htt_tx_mode_switch_mode mode;
+ enum htt_q_depth_type type;
+ } tx_q_state;
};
#define RX_HTT_HDR_STATUS_LEN 64
@@ -1555,7 +1734,7 @@ struct htt_rx_desc {
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1583,7 +1762,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1592,11 +1772,31 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp);
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
-int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_tx(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu);
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6060dda4e910..961ef0626680 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -31,8 +31,9 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
@@ -192,7 +193,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
- tasklet_schedule(&htt->rx_replenish_task);
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -223,12 +225,10 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- tasklet_kill(&htt->rx_replenish_task);
- tasklet_kill(&htt->txrx_compl_task);
- skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
+ skb_queue_purge(&htt->tx_fetch_ind_q);
ath10k_htt_rx_ring_free(htt);
@@ -281,7 +281,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
- u8 **fw_desc, int *fw_desc_len,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
@@ -323,48 +322,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return -EIO;
}
- /*
- * Copy the FW rx descriptor for this MSDU from the rx
- * indication message into the MSDU's netbuf. HL uses the
- * same rx indication message definition as LL, and simply
- * appends new info (fields from the HW rx desc, and the
- * MSDU payload itself). So, the offset into the rx
- * indication message only has to account for the standard
- * offset of the per-MSDU FW rx desc info within the
- * message, and how many bytes of the per-MSDU FW rx desc
- * info have already been consumed. (And the endianness of
- * the host, since for a big-endian host, the rx ind
- * message contents, including the per-MSDU rx desc bytes,
- * were byteswapped during upload.)
- */
- if (*fw_desc_len > 0) {
- rx_desc->fw_desc.info0 = **fw_desc;
- /*
- * The target is expected to only provide the basic
- * per-MSDU rx descriptors. Just to be sure, verify
- * that the target has not attached extension data
- * (e.g. LRO flow ID).
- */
-
- /* or more, if there's extension data */
- (*fw_desc)++;
- (*fw_desc_len)--;
- } else {
- /*
- * When an oversized AMSDU happened, FW will lost
- * some of MSDU status - in this case, the FW
- * descriptors provided will be less than the
- * actual MSDUs inside this MPDU. Mark the FW
- * descriptors so that it will still deliver to
- * upper stack, if no CRC error for this MPDU.
- *
- * FIX THIS - the FW descriptors are actually for
- * MSDUs in the end of this A-MSDU instead of the
- * beginning.
- */
- rx_desc->fw_desc.info0 = 0;
- }
-
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +380,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
- ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u32 paddr)
{
@@ -536,7 +486,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
- vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_DMA);
+ vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
if (!vaddr)
goto err_dma_ring;
@@ -545,7 +495,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
vaddr = dma_alloc_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
- &paddr, GFP_DMA);
+ &paddr, GFP_KERNEL);
if (!vaddr)
goto err_dma_idx;
@@ -563,15 +513,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
- tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
- (unsigned long)htt);
-
- skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
-
- tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
- (unsigned long)htt);
+ skb_queue_head_init(&htt->tx_fetch_ind_q);
+ atomic_set(&htt->num_mpdus_ready, 0);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
@@ -674,7 +619,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
rate &= ~RX_PPDU_START_RATE_FLAG;
sband = &ar->mac.sbands[status->band];
- status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate);
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
@@ -798,7 +743,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
if (WARN_ON_ONCE(!arvif))
return NULL;
- if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ if (ath10k_mac_vif_chan(arvif->vif, &def))
return NULL;
return def.chan;
@@ -860,6 +805,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -979,7 +926,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -987,7 +934,8 @@ static void ath10k_process_rx(struct ath10k *ar,
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
(__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
- status->flag == 0 ? "legacy" : "",
+ (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ?
+ "legacy" : "",
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
@@ -1005,7 +953,7 @@ static void ath10k_process_rx(struct ath10k *ar,
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
- ieee80211_rx(ar->hw, skb);
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
@@ -1014,7 +962,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
- ar->fw_features))
+ ar->running_fw->fw_file.fw_features))
len = round_up(len, 4);
return len;
@@ -1076,20 +1024,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_tail_len(ar, enctype));
/* MMIC */
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
skb_trim(msdu, msdu->len - 8);
/* Head */
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- memmove((void *)msdu->data + crypto_len,
- (void *)msdu->data, hdr_len);
- skb_pull(msdu, crypto_len);
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
}
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1098,9 +1051,11 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1114,7 +1069,13 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
*/
/* pull decapped header and copy SA & DA */
- hdr = (struct ieee80211_hdr *)msdu->data;
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
+
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
@@ -1142,6 +1103,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
@@ -1158,8 +1120,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- rfc1042 += round_up(hdr_len, 4) +
- round_up(crypto_len, 4);
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
@@ -1180,6 +1142,8 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
void *rfc1042;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1190,6 +1154,11 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
if (WARN_ON_ONCE(!rfc1042))
return;
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, l3_pad_bytes);
+
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
@@ -1220,6 +1189,8 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1227,7 +1198,11 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
* [payload]
*/
- skb_pull(msdu, sizeof(struct amsdu_subframe_hdr));
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
@@ -1330,6 +1305,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool has_tkip_err;
bool has_peer_idx_invalid;
bool is_decrypted;
+ bool is_mgmt;
u32 attention;
if (skb_queue_empty(amsdu))
@@ -1338,6 +1314,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
@@ -1379,6 +1358,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
+ RX_FLAG_ONLY_MONITOR |
RX_FLAG_MMIC_STRIPPED);
if (has_fcs_err)
@@ -1387,10 +1367,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (has_tkip_err)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (is_decrypted)
- status->flag |= RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
+ /* Firmware reports all necessary management frames via WMI already.
+ * They are not reported to monitor interfaces at all so pass the ones
+ * coming via HTT to monitor interfaces instead. This simplifies
+ * matters a lot.
+ */
+ if (is_mgmt)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (is_decrypted) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (likely(!is_mgmt))
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1403,6 +1394,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
*/
if (!is_decrypted)
continue;
+ if (is_mgmt)
+ continue;
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1503,14 +1496,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- struct sk_buff *msdu;
- struct htt_rx_desc *rxd;
- bool is_mgmt;
- bool has_fcs_err;
-
- msdu = skb_peek(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
-
/* FIXME: It might be a good idea to do some fuzzy-testing to drop
* invalid/dangerous frames.
*/
@@ -1520,23 +1505,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
- is_mgmt = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- has_fcs_err = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
- /* Management frames are handled via WMI events. The pros of such
- * approach is that channel is explicitly provided in WMI events
- * whereas HTT doesn't provide channel information for Rxed frames.
- *
- * However some firmware revisions don't report corrupted frames via
- * WMI so don't drop them.
- */
- if (is_mgmt && !has_fcs_err) {
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
- return false;
- }
-
if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
@@ -1558,25 +1526,50 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
__skb_queue_purge(amsdu);
}
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
- struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct sk_buff_head amsdu;
- int num_mpdu_ranges;
- int fw_desc_len;
- u8 *fw_desc;
- int i, ret, mpdu_count = 0;
+ int ret, num_msdus;
- lockdep_assert_held(&htt->rx_ring.lock);
+ __skb_queue_head_init(&amsdu);
- if (htt->rx_confused)
- return;
+ spin_lock_bh(&htt->rx_ring.lock);
+ if (htt->rx_confused) {
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return -EIO;
+ }
+ ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ return ret;
+ }
- fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
- fw_desc = (u8 *)&rx->fw_desc;
+ num_msdus = skb_queue_len(&amsdu);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
+
+ return num_msdus;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ int num_mpdu_ranges;
+ int i, mpdu_count = 0;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1590,80 +1583,10 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count;
- while (mpdu_count--) {
- __skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
- &fw_desc_len, &amsdu);
- if (ret < 0) {
- ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
- __skb_queue_purge(&amsdu);
- /* FIXME: It's probably a good idea to reboot the
- * device instead of leaving it inoperable.
- */
- htt->rx_confused = true;
- break;
- }
-
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
- }
-
- tasklet_schedule(&htt->rx_replenish_task);
+ atomic_add(mpdu_count, &htt->num_mpdus_ready);
}
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
- struct htt_rx_fragment_indication *frag)
-{
- struct ath10k *ar = htt->ar;
- struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct sk_buff_head amsdu;
- int ret;
- u8 *fw_desc;
- int fw_desc_len;
-
- fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
- fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
- __skb_queue_head_init(&amsdu);
-
- spin_lock_bh(&htt->rx_ring.lock);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &amsdu);
- spin_unlock_bh(&htt->rx_ring.lock);
-
- tasklet_schedule(&htt->rx_replenish_task);
-
- ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
-
- if (ret) {
- ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
- ret);
- __skb_queue_purge(&amsdu);
- return;
- }
-
- if (skb_queue_len(&amsdu) != 1) {
- ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
- __skb_queue_purge(&amsdu);
- return;
- }
-
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-
- if (fw_desc_len > 0) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "expecting more fragmented rx in one indication %d\n",
- fw_desc_len);
- }
-}
-
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1675,19 +1598,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_DATA_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
default:
ath10k_warn(ar, "unhandled tx completion status %d\n", status);
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
@@ -1697,7 +1620,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_unref(htt, &tx_done);
+
+ /* kfifo_put: In practice firmware shouldn't fire off per-CE
+ * interrupt and main interrupt (MSI/-X range case) for the same
+ * HTC service so it should be safe to use kfifo_put w/o lock.
+ *
+ * From kfifo_put() documentation:
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+ if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
}
}
@@ -1832,14 +1768,15 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED;
}
-static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
- struct sk_buff_head *list)
+static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
+ int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
@@ -1879,10 +1816,12 @@ static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu);
+ num_msdu++;
}
+ return num_msdu;
}
-static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
@@ -1895,12 +1834,12 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid;
bool offload;
bool frag;
- int ret;
+ int ret, num_msdus = 0;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
- return;
+ return -EIO;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
@@ -1919,7 +1858,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
- return;
+ return -EINVAL;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
@@ -1930,14 +1869,14 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
- return;
+ return -EIO;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
- ath10k_htt_rx_h_rx_offload(ar, &list);
+ num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
@@ -1950,6 +1889,7 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
+ num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
@@ -1962,14 +1902,299 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
- return;
+ return -EIO;
+ }
+ }
+ return num_msdus;
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+ const __le32 *resp_ids,
+ int num_resp_ids)
+{
+ int i;
+ u32 resp_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+ num_resp_ids);
+
+ for (i = 0; i < num_resp_ids; i++) {
+ resp_id = le32_to_cpu(resp_ids[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+ resp_id);
+
+ /* TODO: free resp_id */
+ }
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_fetch_record *record;
+ size_t len;
+ size_t max_num_bytes;
+ size_t max_num_msdus;
+ size_t num_bytes;
+ size_t num_msdus;
+ const __le32 *resp_ids;
+ u16 num_records;
+ u16 num_resp_ids;
+ u16 peer_id;
+ u8 tid;
+ int ret;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+ return;
+ }
+
+ num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+ len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+ len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+ num_records, num_resp_ids,
+ le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+ if (!ar->htt.tx_q_state.enabled) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+ return;
+ }
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+ return;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_fetch_ind.records[i];
+ peer_id = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+ tid = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_TID);
+ max_num_msdus = le16_to_cpu(record->num_msdus);
+ max_num_bytes = le32_to_cpu(record->num_bytes);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+ i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ num_msdus = 0;
+ num_bytes = 0;
+
+ while (num_msdus < max_num_msdus &&
+ num_bytes < max_num_bytes) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+
+ num_msdus++;
+ num_bytes += ret;
}
+
+ record->num_msdus = cpu_to_le16(num_msdus);
+ record->num_bytes = cpu_to_le32(num_bytes);
+
+ ath10k_htt_tx_txq_recalc(hw, txq);
+ }
+
+ rcu_read_unlock();
+
+ resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+ ret = ath10k_htt_tx_fetch_resp(ar,
+ resp->tx_fetch_ind.token,
+ resp->tx_fetch_ind.fetch_seq_num,
+ resp->tx_fetch_ind.records,
+ num_records);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+ le32_to_cpu(resp->tx_fetch_ind.token), ret);
+ /* FIXME: request fw restart */
+ }
+
+ ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ size_t len;
+ int num_resp_ids;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+ return;
}
- tasklet_schedule(&htt->rx_replenish_task);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+ len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+ return;
+ }
+
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+ resp->tx_fetch_confirm.resp_ids,
+ num_resp_ids);
}
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ const struct htt_tx_mode_switch_record *record;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ size_t len;
+ size_t num_records;
+ enum htt_tx_mode_switch_mode mode;
+ bool enable;
+ u16 info0;
+ u16 info1;
+ u16 threshold;
+ u16 peer_id;
+ u8 tid;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+ return;
+ }
+
+ info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+ info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+ enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+ num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+ mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+ threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+ info0, info1, enable, num_records, mode, threshold);
+
+ len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+ return;
+ }
+
+ switch (mode) {
+ case HTT_TX_MODE_SWITCH_PUSH:
+ case HTT_TX_MODE_SWITCH_PUSH_PULL:
+ break;
+ default:
+ ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+ mode);
+ return;
+ }
+
+ if (!enable)
+ return;
+
+ ar->htt.tx_q_state.enabled = enable;
+ ar->htt.tx_q_state.mode = mode;
+ ar->htt.tx_q_state.num_push_allowed = threshold;
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_mode_switch_ind.records[i];
+ info0 = le16_to_cpu(record->info0);
+ peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+ tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq = (void *)txq->drv_priv;
+ artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ rcu_read_unlock();
+
+ ath10k_mac_tx_push_pending(ar);
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ bool release;
+
+ release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+ /* Free the indication buffer */
+ if (release)
+ dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1985,8 +2210,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
- dev_kfree_skb_any(skb);
- return;
+ return true;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
@@ -1998,11 +2222,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- spin_lock_bh(&htt->rx_ring.lock);
- __skb_queue_tail(&htt->rx_compl_q, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
- tasklet_schedule(&htt->txrx_compl_task);
- return;
+ ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+ break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@@ -2023,28 +2244,31 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct htt_tx_done tx_done = {};
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
- tx_done.msdu_id =
- __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+ tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_MGMT_TX_STATUS_RETRY:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_MGMT_TX_STATUS_DROP:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
- ath10k_txrx_tx_unref(htt, &tx_done);
+ status = ath10k_txrx_tx_unref(htt, &tx_done);
+ if (!status) {
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+ }
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
- skb_queue_tail(&htt->tx_compl_q, skb);
- tasklet_schedule(&htt->txrx_compl_task);
- return;
+ ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
+ break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@@ -2060,7 +2284,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
- ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+ atomic_inc(&htt->num_mpdus_ready);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -2083,12 +2307,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_delba(ar, resp);
break;
case HTT_T2H_MSG_TYPE_PKTLOG: {
- struct ath10k_pktlog_hdr *hdr =
- (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
-
trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
- sizeof(*hdr) +
- __le16_to_cpu(hdr->size));
+ skb->len -
+ offsetof(struct htt_resp,
+ pktlog_msg.payload));
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
@@ -2098,22 +2320,41 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
- spin_lock_bh(&htt->rx_ring.lock);
__skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
- spin_unlock_bh(&htt->rx_ring.lock);
- tasklet_schedule(&htt->txrx_compl_task);
- return;
+ return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
break;
- case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+ u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+ u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+ ar->tgt_oper_chan =
+ __ieee80211_get_channel(ar->hw->wiphy, freq);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt chan change freq %u phymode %s\n",
+ freq, ath10k_wmi_phymode_str(phymode));
break;
+ }
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+ struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+ if (!tx_fetch_ind) {
+ ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+ break;
+ }
+ skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
+ ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
+ break;
case HTT_T2H_MSG_TYPE_EN_STATS:
- case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
- case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
- case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
@@ -2121,34 +2362,116 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb->data, skb->len);
break;
};
+ return true;
+}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
- /* Free the indication buffer */
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
-EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
-static void ath10k_htt_txrx_compl_task(unsigned long ptr)
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
- struct ath10k *ar = htt->ar;
- struct htt_resp *resp;
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {};
+ struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
+ unsigned long flags;
+ int quota = 0, done, num_rx_msdus;
+ bool resched_napi = false;
+
+ __skb_queue_head_init(&tx_ind_q);
+
+ /* Since in-ord-ind can deliver more than 1 A-MSDU in single event,
+ * process it first to utilize full available quota.
+ */
+ while (quota < budget) {
+ if (skb_queue_empty(&htt->rx_in_ord_compl_q))
+ break;
+
+ skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
+ if (!skb) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
- while ((skb = skb_dequeue(&htt->tx_compl_q))) {
- ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
dev_kfree_skb_any(skb);
+ if (num_rx_msdus > 0)
+ quota += num_rx_msdus;
+
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ !skb_queue_empty(&htt->rx_in_ord_compl_q)) {
+ resched_napi = true;
+ goto exit;
+ }
}
- spin_lock_bh(&htt->rx_ring.lock);
- while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
- resp = (struct htt_resp *)skb->data;
- ath10k_htt_rx_handler(htt, &resp->rx_ind);
- dev_kfree_skb_any(skb);
+ while (quota < budget) {
+ /* no more data to receive */
+ if (!atomic_read(&htt->num_mpdus_ready))
+ break;
+
+ num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
+ if (num_rx_msdus < 0) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ quota += num_rx_msdus;
+ atomic_dec(&htt->num_mpdus_ready);
+ if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
+ atomic_read(&htt->num_mpdus_ready)) {
+ resched_napi = true;
+ goto exit;
+ }
}
- while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
- ath10k_htt_rx_in_ord_ind(ar, skb);
+ /* From NAPI documentation:
+ * The napi poll() function may also process TX completions, in which
+ * case if it processes the entire TX ring then it should count that
+ * work as the rest of the budget.
+ */
+ if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+ quota = budget;
+
+ /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+ * From kfifo_get() documentation:
+ * Note that with only one concurrent reader and one concurrent writer,
+ * you don't need extra locking to use these macro.
+ */
+ while (kfifo_get(&htt->txdone_fifo, &tx_done))
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ ath10k_mac_tx_push_pending(ar);
+
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
- spin_unlock_bh(&htt->rx_ring.lock);
+
+exit:
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+ /* In case of rx failure or more data to read, report budget
+ * to reschedule NAPI poll
+ */
+ done = resched_napi ? budget : quota;
+
+ return done;
}
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 16823970dbfd..ae5b33fe5ba8 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,53 +22,185 @@
#include "txrx.h"
#include "debug.h"
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
{
- if (limit_mgmt_desc)
- htt->num_pending_mgmt_tx--;
+ int exp;
+ int factor;
+
+ exp = 0;
+ factor = count >> 7;
+
+ while (factor >= 64 && exp < 4) {
+ factor >>= 3;
+ exp++;
+ }
+
+ if (exp == 4)
+ return 0xff;
+
+ if (count > 0)
+ factor = max(1, factor);
+
+ return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+ SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+ int idx;
+ u32 bit;
+ u16 peer_id;
+ u8 tid;
+ u8 count;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ if (txq->sta) {
+ arsta = (void *)txq->sta->drv_priv;
+ peer_id = arsta->peer_id;
+ } else {
+ peer_id = arvif->peer_id;
+ }
+
+ tid = txq->tid;
+ bit = BIT(peer_id % 32);
+ idx = peer_id / 32;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+ peer_id, tid);
+ return;
+ }
+
+ ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+ peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ u32 seq;
+ size_t size;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+ seq++;
+ ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+ seq);
+
+ size = sizeof(*ar->htt.tx_q_state.vaddr);
+ dma_sync_single_for_device(ar->dev,
+ ar->htt.tx_q_state.paddr,
+ size,
+ DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
- spin_lock_bh(&htt->tx_lock);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
- spin_unlock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ return 0;
}
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc, bool is_probe_resp)
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp)
{
struct ath10k *ar = htt->ar;
- int ret = 0;
- spin_lock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
- if (htt->num_pending_tx >= htt->max_num_pending_tx) {
- ret = -EBUSY;
- goto exit;
- }
+ if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+ return 0;
- if (limit_mgmt_desc) {
- if (is_probe_resp && (htt->num_pending_mgmt_tx >
- ar->hw_params.max_probe_resp_desc_thres)) {
- ret = -EBUSY;
- goto exit;
- }
- htt->num_pending_mgmt_tx++;
- }
+ if (is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
- htt->num_pending_tx++;
- if (htt->num_pending_tx == htt->max_num_pending_tx)
- ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+ htt->num_pending_mgmt_tx++;
-exit:
- spin_unlock_bh(&htt->tx_lock);
- return ret;
+ return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+ return;
+
+ htt->num_pending_mgmt_tx--;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -97,6 +229,87 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
+static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr)
+ return;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr,
+ htt->frag_desc.paddr);
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+
+ dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
+ kfree(htt->tx_q_state.vaddr);
+}
+
+static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+ int ret;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return 0;
+
+ htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
+ htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
+ htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+ htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
+ if (!htt->tx_q_state.vaddr)
+ return -ENOMEM;
+
+ htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
+ size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
+ kfree(htt->tx_q_state.vaddr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
@@ -111,36 +324,49 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->txbuf.paddr,
- GFP_DMA);
+ GFP_KERNEL);
if (!htt->txbuf.vaddr) {
ath10k_err(ar, "failed to alloc tx buffer\n");
ret = -ENOMEM;
goto free_idr_pending_tx;
}
- if (!ar->hw_params.continuous_frag_desc)
- goto skip_frag_desc_alloc;
-
- size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
- htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
- &htt->frag_desc.paddr,
- GFP_DMA);
- if (!htt->frag_desc.vaddr) {
- ath10k_warn(ar, "failed to alloc fragment desc memory\n");
- ret = -ENOMEM;
+ ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
}
-skip_frag_desc_alloc:
+ ret = ath10k_htt_tx_alloc_txq(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txq: %d\n", ret);
+ goto free_frag_desc;
+ }
+
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+ goto free_txq;
+ }
+
return 0;
+free_txq:
+ ath10k_htt_tx_free_txq(htt);
+
+free_frag_desc:
+ ath10k_htt_tx_free_cont_frag_desc(htt);
+
free_txbuf:
size = htt->max_num_pending_tx *
sizeof(struct ath10k_htt_txbuf);
dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr,
htt->txbuf.paddr);
+
free_idr_pending_tx:
idr_destroy(&htt->pending_tx);
+
return ret;
}
@@ -152,8 +378,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
- tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
ath10k_txrx_tx_unref(htt, &tx_done);
@@ -174,12 +400,10 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
htt->txbuf.paddr);
}
- if (htt->frag_desc.vaddr) {
- size = htt->max_num_pending_tx *
- sizeof(struct htt_msdu_ext_desc);
- dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
- htt->frag_desc.paddr);
- }
+ ath10k_htt_tx_free_txq(htt);
+ ath10k_htt_tx_free_cont_frag_desc(htt);
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -268,7 +492,9 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg *cfg;
int ret, size;
+ u8 info;
if (!ar->hw_params.continuous_frag_desc)
return 0;
@@ -286,14 +512,31 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
skb_put(skb, size);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
- cmd->frag_desc_bank_cfg.info = 0;
- cmd->frag_desc_bank_cfg.num_banks = 1;
- cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
- cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
- __cpu_to_le32(htt->frag_desc.paddr);
- cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
- cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
- __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
+ cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
@@ -439,6 +682,86 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
return 0;
}
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ const u16 resp_id = 0;
+ int len = 0;
+ int ret;
+
+ /* Response IDs are echo-ed back only for host driver convienence
+ * purposes. They aren't used for anything in the driver yet so use 0.
+ */
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->tx_fetch_resp);
+ len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+ cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+ cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+ cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+ cmd->tx_fetch_resp.token = token;
+
+ memcpy(cmd->tx_fetch_resp.records, records,
+ sizeof(records[0]) * num_records);
+
+ ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+ goto err_free_skb;
+ }
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_vif *arvif;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ return ar->scan.vdev_id;
+ } else if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ return arvif->vdev_id;
+ } else if (ar->monitor_started) {
+ return ar->monitor_vdev_id;
+ } else {
+ return 0;
+ }
+}
+
+static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+ else if (cb->flags & ATH10K_SKB_F_QOS)
+ return skb->priority % IEEE80211_QOS_CTL_TID_MASK;
+ else
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+}
+
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
@@ -446,25 +769,11 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
- u8 vdev_id = skb_cb->vdev_id;
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
int len = 0;
int msdu_id = -1;
int res;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
- if (res)
- goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
@@ -473,10 +782,17 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
@@ -503,8 +819,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
memcpy(cmd->mgmt_tx.hdr, msdu->data,
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
- skb_cb->htt.txbuf = NULL;
-
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err_unmap_msdu;
@@ -519,65 +833,55 @@ err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
-int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf *txbuf;
struct htt_data_tx_desc_frag *frags;
- u8 vdev_id = skb_cb->vdev_id;
- u8 tid = skb_cb->htt.tid;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
int prefetch_len;
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
u32 frags_paddr = 0;
+ u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
- ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
- if (res)
- goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
- skb_cb->htt.txbuf = &htt->txbuf.vaddr[msdu_id];
- skb_cb->htt.txbuf_paddr = htt->txbuf.paddr +
- (sizeof(struct ath10k_htt_txbuf) * msdu_id);
+ txbuf = &htt->txbuf.vaddr[msdu_id];
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
- } else if (!skb_cb->htt.nohwcrypt &&
- skb_cb->txmode == ATH10K_HW_TXRX_RAW &&
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
}
@@ -590,7 +894,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err_free_msdu_id;
}
- switch (skb_cb->txmode) {
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
@@ -610,16 +917,16 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
frags_paddr = htt->frag_desc.paddr +
(sizeof(struct htt_msdu_ext_desc) * msdu_id);
} else {
- frags = skb_cb->htt.txbuf->frags;
+ frags = txbuf->frags;
frags[0].dword_addr.paddr =
__cpu_to_le32(skb_cb->paddr);
frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
frags[1].dword_addr.paddr = 0;
frags[1].dword_addr.len = 0;
- frags_paddr = skb_cb->htt.txbuf_paddr;
+ frags_paddr = txbuf_paddr;
}
- flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -646,17 +953,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
* avoid extra memory allocations, compress data structures and thus
* improve performance. */
- skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
- skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
- sizeof(skb_cb->htt.txbuf->cmd_hdr) +
- sizeof(skb_cb->htt.txbuf->cmd_tx) +
- prefetch_len);
- skb_cb->htt.txbuf->htc_hdr.flags = 0;
-
- if (skb_cb->htt.nohwcrypt)
- flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
- if (!skb_cb->is_protected)
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -675,20 +978,27 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
*/
flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
- skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
- skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
- skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
- skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
- skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
- skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
- skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
- skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
- (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
+ (u32)skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@@ -696,12 +1006,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
- sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
- sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
- sizeof(skb_cb->htt.txbuf->frags);
- sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
- sizeof(skb_cb->htt.txbuf->cmd_hdr) +
- sizeof(skb_cb->htt.txbuf->cmd_tx);
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
sg_items[1].transfer_id = 0;
sg_items[1].transfer_context = NULL;
@@ -720,11 +1030,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
- spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 7b84d08a5154..675e75d66db2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -19,7 +19,6 @@
#include "hw.h"
const struct ath10k_hw_regs qca988x_regs = {
- .rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00004000,
.rtc_wmac_base_address = 0x00005000,
.soc_core_base_address = 0x00009000,
@@ -46,7 +45,6 @@ const struct ath10k_hw_regs qca988x_regs = {
};
const struct ath10k_hw_regs qca6174_regs = {
- .rtc_state_cold_reset_mask = 0x00002000,
.rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
@@ -73,7 +71,6 @@ const struct ath10k_hw_regs qca6174_regs = {
};
const struct ath10k_hw_regs qca99x0_regs = {
- .rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00080000,
.rtc_wmac_base_address = 0x00000000,
.soc_core_base_address = 0x00082000,
@@ -88,7 +85,7 @@ const struct ath10k_hw_regs qca99x0_regs = {
.ce7_base_address = 0x0004bc00,
/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
* CE0 and CE1 no other copy engine is directly referred in the code.
- * It is not really neccessary to assign address for newly supported
+ * It is not really necessary to assign address for newly supported
* CEs in this address table.
* Copy Engine Address
* CE8 0x0004c000
@@ -109,6 +106,38 @@ const struct ath10k_hw_regs qca99x0_regs = {
.pcie_intr_clr_address = 0x00000010,
};
+const struct ath10k_hw_regs qca4019_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .soc_core_base_address = 0x00082000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* qca4019 supports upto 12 copy engines. Since base address
+ * of ce8 to ce11 are not directly referred in the code,
+ * no need have them in separate members in this table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .fw_indicator_address = 0x0004f00c,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
const struct ath10k_hw_values qca988x_values = {
.rtc_state_val_on = 3,
.ce_count = 8,
@@ -136,22 +165,70 @@ const struct ath10k_hw_values qca99x0_values = {
.ce_desc_meta_data_lsb = 4,
};
+const struct ath10k_hw_values qca9888_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_values qca4019_values = {
+ .ce_count = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
u32 cc_fix = 0;
+ u32 rcc_fix = 0;
+ enum ath10k_hw_cc_wraparound_type wraparound_type;
survey->filled |= SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY;
- if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) {
- cc_fix = 0x7fffffff;
- survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ wraparound_type = ar->hw_params.cc_wraparound_type;
+
+ if (cc < cc_prev || rcc < rcc_prev) {
+ switch (wraparound_type) {
+ case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+ if (cc < cc_prev) {
+ cc_fix = 0x7fffffff;
+ survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ }
+ break;
+ case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+ if (cc < cc_prev)
+ cc_fix = 0x7fffffff;
+
+ if (rcc < rcc_prev)
+ rcc_fix = 0x7fffffff;
+ break;
+ case ATH10K_HW_CC_WRAP_DISABLED:
+ break;
+ }
}
cc -= cc_prev - cc_fix;
- rcc -= rcc_prev;
+ rcc -= rcc_prev - rcc_fix;
survey->time = CCNT_TO_MSEC(ar, cc);
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
+
+const struct ath10k_hw_ops qca988x_ops = {
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 713c2bcea178..6038b7486f1d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -26,7 +26,10 @@
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9888_2_0_DEVICE_ID (0x0056)
+#define QCA9984_1_0_DEVICE_ID (0x0046)
#define QCA9377_1_0_DEVICE_ID (0x0042)
+#define QCA9887_1_0_DEVICE_ID (0x0050)
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
@@ -35,11 +38,16 @@
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
-#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION 0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV 0
+#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA6174 target BMI version signatures */
#define QCA6174_HW_1_0_VERSION 0x05000000
#define QCA6174_HW_1_1_VERSION 0x05000001
@@ -76,14 +84,10 @@ enum qca9377_chip_id_rev {
};
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
-#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
-#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
@@ -94,18 +98,36 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE "firmware.bin"
-#define QCA99X0_HW_2_0_OTP_FILE "otp.bin"
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
+#define QCA9984_HW_DEV_TYPE 0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
+#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9888 2.0 defines */
+#define QCA9888_HW_2_0_DEV_VERSION 0x1000000
+#define QCA9888_HW_DEV_TYPE 0xc
+#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
+#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
+#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
-#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA4019 1.0 definitions */
+#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
+#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
+#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -126,8 +148,6 @@ enum qca9377_chip_id_rev {
#define REG_DUMP_COUNT_QCA988X 60
-#define QCA988X_CAL_DATA_LEN 2116
-
struct ath10k_fw_ie {
__le32 id;
__le32 len;
@@ -199,11 +219,14 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
+ ATH10K_HW_QCA9888,
+ ATH10K_HW_QCA9984,
ATH10K_HW_QCA9377,
+ ATH10K_HW_QCA4019,
+ ATH10K_HW_QCA9887,
};
struct ath10k_hw_regs {
- u32 rtc_state_cold_reset_mask;
u32 rtc_soc_base_address;
u32 rtc_wmac_base_address;
u32 soc_core_base_address;
@@ -232,6 +255,7 @@ struct ath10k_hw_regs {
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
extern const struct ath10k_hw_regs qca99x0_regs;
+extern const struct ath10k_hw_regs qca4019_regs;
struct ath10k_hw_values {
u32 rtc_state_val_on;
@@ -245,16 +269,22 @@ struct ath10k_hw_values {
extern const struct ath10k_hw_values qca988x_values;
extern const struct ath10k_hw_values qca6174_values;
extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca9888_values;
+extern const struct ath10k_hw_values qca4019_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
-/* Known pecularities:
+/* Known peculiarities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
* - raw have FCS, nwifi doesn't
* - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
@@ -277,15 +307,6 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
-struct ath10k_pktlog_hdr {
- __le16 flags;
- __le16 missed_cnt;
- __le16 log_type;
- __le16 size;
- __le32 timestamp;
- u8 payload[0];
-} __packed;
-
enum ath10k_hw_rate_ofdm {
ATH10K_HW_RATE_OFDM_48M = 0,
ATH10K_HW_RATE_OFDM_24M,
@@ -307,6 +328,110 @@ enum ath10k_hw_rate_cck {
ATH10K_HW_RATE_CCK_SP_2M,
};
+enum ath10k_hw_rate_rev2_cck {
+ ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+ ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
+enum ath10k_hw_cc_wraparound_type {
+ ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+ /* This type is when the HW chip has a quirky Cycle Counter
+ * wraparound which resets to 0x7fffffff instead of 0. All
+ * other CC related counters (e.g. Rx Clear Count) are divided
+ * by 2 so they never wraparound themselves.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+ /* Each hw counter wrapsaround independently. When the
+ * counter overflows the repestive counter is right shifted
+ * by 1, i.e reset to 0x7fffffff, and other counters will be
+ * running unaffected. In this type of wraparound, it should
+ * be possible to report accurate Rx busy time unlike the
+ * first type.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
+struct ath10k_hw_params {
+ u32 id;
+ u16 dev_id;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+ u32 otp_exe_param;
+
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
+ */
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
+ u32 channel_counters_freq_hz;
+
+ /* Mgmt tx descriptors threshold for limiting probe response
+ * frames.
+ */
+ u32 max_probe_resp_desc_thres;
+
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+ u32 cal_data_len;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *board;
+ size_t board_size;
+ size_t board_ext_size;
+ } fw;
+
+ /* qca99x0 family chips deliver broadcast/multicast management
+ * frames encrypted and expect software do decryption.
+ */
+ bool sw_decrypt_mcast_mgmt;
+
+ const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_rx_desc *rxd)
+{
+ if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+ return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
@@ -348,14 +473,19 @@ enum ath10k_hw_rate_cck {
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 128
#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_TX_STATS_NUM_STATIONS 118
#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
(TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
#define TARGET_10X_NUM_TIDS_MAX 256
#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
(TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_TX_STATS_NUM_PEERS) * 2)
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
@@ -400,15 +530,14 @@ enum ath10k_hw_rate_cck {
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
#define TARGET_10_4_NUM_PEER_KEYS 2
#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
#define TARGET_10_4_AST_SKID_LIMIT 32
-#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \
- BIT(2) | BIT(3))
-#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \
- BIT(2) | BIT(3))
/* 100 ms for video, best-effort, and background */
#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
@@ -434,7 +563,6 @@ enum ath10k_hw_rate_cck {
#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10_4_VOW_CONFIG 0
#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
-#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
#define TARGET_10_4_MAX_PEER_EXT_STATS 16
#define TARGET_10_4_SMART_ANT_CAP 0
@@ -468,7 +596,6 @@ enum ath10k_hw_rate_cck {
/* as of IP3.7.1 */
#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
-#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000
@@ -531,7 +658,10 @@ enum ath10k_hw_rate_cck {
#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
@@ -544,6 +674,8 @@ enum ath10k_hw_rate_cck {
#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
#define SI_CONFIG_I2C_LSB 16
@@ -557,7 +689,9 @@ enum ath10k_hw_rate_cck {
#define SI_CONFIG_DIVIDER_LSB 0
#define SI_CONFIG_DIVIDER_MASK 0x0000000f
#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_LSB 10
#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_LSB 9
#define SI_CS_DONE_INT_MASK 0x00000200
#define SI_CS_START_LSB 8
#define SI_CS_START_MASK 0x00000100
@@ -586,6 +720,7 @@ enum ath10k_hw_rate_cck {
#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
#define FW_IND_EVENT_PENDING 1
#define FW_IND_INITIALIZED 2
+#define FW_IND_HOST_READY 0x80000000
/* HOST_REG interrupt from firmware */
#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
@@ -607,7 +742,10 @@ enum ath10k_hw_rate_cck {
#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
@@ -662,6 +800,18 @@ enum ath10k_hw_rate_cck {
#define WINDOW_READ_ADDR_ADDRESS MISSING
#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB 8
+#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB 16
+
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 1e1bef349487..76297d69f1ed 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = {
{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
};
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
@@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = {
#define ath10k_g_rates (ath10k_rates + 0)
#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
static bool ath10k_mac_bitrate_is_cck(int bitrate)
{
switch (bitrate) {
@@ -90,7 +119,7 @@ static u8 ath10k_mac_bitrate_to_rate(int bitrate)
}
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
- u8 hw_rate)
+ u8 hw_rate, bool cck)
{
const struct ieee80211_rate *rate;
int i;
@@ -98,6 +127,9 @@ u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
for (i = 0; i < sband->n_bitrates; i++) {
rate = &sband->bitrates[i];
+ if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
if (rate->hw_value == hw_rate)
return i;
else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
@@ -154,6 +186,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+ enum wmi_host_platform_type platform_type;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+ platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+ else
+ platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+ ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**********/
/* Crypto */
/**********/
@@ -247,7 +299,8 @@ static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
- arvif->vif->type != NL80211_IFTYPE_ADHOC))
+ arvif->vif->type != NL80211_IFTYPE_ADHOC &&
+ arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
return -EINVAL;
spin_lock_bh(&ar->data_lock);
@@ -445,10 +498,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(peer, &ar->peers, list) {
- if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->vif->addr))
continue;
- if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->bssid))
continue;
if (peer->keys[key->keyidx] == key)
@@ -478,7 +531,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
enum wmi_phy_mode phymode = MODE_UNKNOWN;
switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -501,7 +554,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
break;
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
phymode = MODE_11A;
@@ -614,10 +667,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
*def = &conf->def;
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_create(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 vdev_id,
+ const u8 *addr,
enum wmi_peer_type peer_type)
{
struct ath10k_vif *arvif;
+ struct ath10k_peer *peer;
int num_peers = 0;
int ret;
@@ -646,6 +704,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
return ret;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, vdev_id, addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+ addr, vdev_id);
+ ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ return -ENOENT;
+ }
+
+ peer->vif = vif;
+ peer->sta = sta;
+
+ spin_unlock_bh(&ar->data_lock);
+
ar->num_peers++;
return 0;
@@ -727,6 +801,8 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
+ int peer_id;
+ int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -738,6 +814,22 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
@@ -748,6 +840,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
static void ath10k_peer_cleanup_all(struct ath10k *ar)
{
struct ath10k_peer *peer, *tmp;
+ int i;
lockdep_assert_held(&ar->conf_mutex);
@@ -756,6 +849,10 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
list_del(&peer->list);
kfree(peer);
}
+
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
+ ar->peer_map[i] = NULL;
+
spin_unlock_bh(&ar->data_lock);
ar->num_peers = 0;
@@ -1354,10 +1451,7 @@ static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
const u8 *p2p_ie;
int ret;
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
- return 0;
-
- if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
return 0;
mgmt = (void *)bcn->data;
@@ -1724,7 +1818,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
arvif->vdev_id);
enable_ps = false;
@@ -1960,7 +2054,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = aid;
- arg->peer_flags |= WMI_PEER_AUTH;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
arg->peer_caps = vif->bss_conf.assoc_capability;
@@ -1968,6 +2062,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ieee80211_bss_conf *info = &vif->bss_conf;
@@ -2002,12 +2097,18 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
if (rsnie || wpaie) {
ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
- arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
}
if (wpaie) {
ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
- arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
+ }
+
+ if (sta->mfp &&
+ test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ arg->peer_flags |= ar->wmi.peer_flags->pmf;
}
}
@@ -2021,7 +2122,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 ratemask;
u8 rate;
int i;
@@ -2081,7 +2182,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
int i, n;
@@ -2104,7 +2205,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
return;
- arg->peer_flags |= WMI_PEER_HT;
+ arg->peer_flags |= ar->wmi.peer_flags->ht;
arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ht_cap->ampdu_factor)) - 1;
@@ -2115,10 +2216,10 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
arg->peer_rate_caps |= WMI_RC_HT_FLAG;
if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
- arg->peer_flags |= WMI_PEER_LDPC;
+ arg->peer_flags |= ar->wmi.peer_flags->ldbc;
if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- arg->peer_flags |= WMI_PEER_40MHZ;
+ arg->peer_flags |= ar->wmi.peer_flags->bw40;
arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
}
@@ -2132,7 +2233,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
- arg->peer_flags |= WMI_PEER_STBC;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
}
if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
@@ -2140,7 +2241,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
arg->peer_rate_caps |= stbc;
- arg->peer_flags |= WMI_PEER_STBC;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
}
if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
@@ -2305,7 +2406,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u16 *vht_mcs_mask;
u8 ampdu_factor;
@@ -2321,10 +2422,10 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
return;
- arg->peer_flags |= WMI_PEER_VHT;
+ arg->peer_flags |= ar->wmi.peer_flags->vht;
- if (def.chan->band == IEEE80211_BAND_2GHZ)
- arg->peer_flags |= WMI_PEER_VHT_2G;
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
arg->peer_vht_caps = vht_cap->cap;
@@ -2341,7 +2442,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
ampdu_factor)) - 1);
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
- arg->peer_flags |= WMI_PEER_80MHZ;
+ arg->peer_flags |= ar->wmi.peer_flags->bw80;
arg->peer_vht_rates.rx_max_rate =
__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
@@ -2366,32 +2467,33 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme)
- arg->peer_flags |= WMI_PEER_QOS;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
if (sta->wme && sta->uapsd_queues) {
- arg->peer_flags |= WMI_PEER_APSD;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
}
break;
case WMI_VDEV_TYPE_STA:
if (vif->bss_conf.qos)
- arg->peer_flags |= WMI_PEER_QOS;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
break;
case WMI_VDEV_TYPE_IBSS:
if (sta->wme)
- arg->peer_flags |= WMI_PEER_QOS;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
break;
default:
break;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
- sta->addr, !!(arg->peer_flags & WMI_PEER_QOS));
+ sta->addr, !!(arg->peer_flags &
+ arvif->ar->wmi.peer_flags->qos));
}
static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
@@ -2402,7 +2504,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2415,7 +2517,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (sta->vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2435,7 +2537,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
/*
* Check VHT first.
*/
@@ -2479,7 +2581,7 @@ static int ath10k_peer_assoc_prepare(struct ath10k *ar,
memset(arg, 0, sizeof(*arg));
ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
- ath10k_peer_assoc_h_crypto(ar, vif, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
@@ -2691,7 +2793,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
- ath10k_warn(ar, "faield to down vdev %i: %d\n",
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->def_wep_key_idx = -1;
@@ -2813,7 +2915,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band **bands;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_channel *channel;
struct wmi_scan_chan_list_arg arg = {0};
struct wmi_channel_arg *ch;
@@ -2825,7 +2927,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
bands = hw->wiphy->bands;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2844,7 +2946,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return -ENOMEM;
ch = arg.channels;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2854,7 +2956,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
if (channel->flags & IEEE80211_CHAN_DISABLED)
continue;
- ch->allow_ht = true;
+ ch->allow_ht = true;
/* FIXME: when should we really allow VHT? */
ch->allow_vht = true;
@@ -2882,7 +2984,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
/* FIXME: why use only legacy modes, why not any
* HT/VHT modes? Would that even make any
* difference? */
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
ch->mode = MODE_11G;
else
ch->mode = MODE_11A;
@@ -2937,7 +3039,7 @@ static void ath10k_regd_update(struct ath10k *ar)
regpair = ar->ath_common.regulatory.regpair;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
nl_dfs_reg = ar->dfs_detector->region;
wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
} else {
@@ -2966,7 +3068,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
request->dfs_region);
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
@@ -2986,6 +3088,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+enum ath10k_mac_tx_path {
+ ATH10K_MAC_TX_HTT,
+ ATH10K_MAC_TX_HTT_MGMT,
+ ATH10K_MAC_TX_WMI_MGMT,
+ ATH10K_MAC_TX_UNKNOWN,
+};
+
void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
{
lockdep_assert_held(&ar->htt.tx_lock);
@@ -3112,35 +3221,11 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
spin_unlock_bh(&ar->htt.tx_lock);
}
-static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
-{
- if (ieee80211_is_mgmt(hdr->frame_control))
- return HTT_DATA_TX_EXT_TID_MGMT;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-
- if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
- return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-
- return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
-}
-
-static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif)
-{
- if (vif)
- return ath10k_vif_to_arvif(vif)->vdev_id;
-
- if (ar->monitor_started)
- return ar->monitor_vdev_id;
-
- ath10k_warn(ar, "failed to resolve vdev id\n");
- return 0;
-}
-
static enum ath10k_hw_txrx_mode
-ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff *skb)
+ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
{
const struct ieee80211_hdr *hdr = (void *)skb->data;
__le16 fc = hdr->frame_control;
@@ -3169,7 +3254,10 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
*/
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
- !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR,
+ ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
/* Workaround:
@@ -3190,14 +3278,22 @@ ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif,
}
static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
- struct sk_buff *skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct sk_buff *skb)
+{
+ const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
IEEE80211_TX_CTL_INJECTED;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
if ((info->flags & mask) == mask)
return false;
+
if (vif)
return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+
return true;
}
@@ -3224,7 +3320,7 @@ static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
*/
hdr = (void *)skb->data;
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
- cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+ cb->flags &= ~ATH10K_SKB_F_QOS;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
}
@@ -3264,8 +3360,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
/* This is case only for P2P_GO */
- if (arvif->vdev_type != WMI_VDEV_TYPE_AP ||
- arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
return;
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
@@ -3280,7 +3375,29 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
}
}
-static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ cb->flags = 0;
+ if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_MGMT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_QOS;
+
+ cb->vif = vif;
+ cb->txq = txq;
+}
+
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
{
/* FIXME: Not really sure since when the behaviour changed. At some
* point new firmware stopped requiring creation of peer entries for
@@ -3288,8 +3405,9 @@ static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar)
* tx credit replenishment and reliability). Assuming it's at least 3.4
* because that's when the `freq` was introduced to TX_FRM HTT command.
*/
- return !(ar->htt.target_version_major >= 3 &&
- ar->htt.target_version_minor >= 4);
+ return (ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4 &&
+ ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
}
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3314,26 +3432,50 @@ unlock:
return ret;
}
-static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+ struct sk_buff *skb,
+ enum ath10k_hw_txrx_mode txmode)
{
- struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = &ar->htt;
- int ret = 0;
-
- switch (cb->txmode) {
+ switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
case ATH10K_HW_TXRX_ETHERNET:
- ret = ath10k_htt_tx(htt, skb);
- break;
+ return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features))
- ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ ar->running_fw->fw_file.fw_features))
+ return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
- ret = ath10k_htt_tx(htt, skb);
+ return ATH10K_MAC_TX_HTT;
else
- ret = ath10k_htt_mgmt_tx(htt, skb);
+ return ATH10K_MAC_TX_HTT_MGMT;
+ }
+
+ return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = -EINVAL;
+
+ switch (txpath) {
+ case ATH10K_MAC_TX_HTT:
+ ret = ath10k_htt_tx(htt, txmode, skb);
+ break;
+ case ATH10K_MAC_TX_HTT_MGMT:
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
+ case ATH10K_MAC_TX_WMI_MGMT:
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ break;
+ case ATH10K_MAC_TX_UNKNOWN:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
break;
}
@@ -3342,6 +3484,64 @@ static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb)
ret);
ieee80211_free_txskb(ar->hw, skb);
}
+
+ return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return -ENOTSUPP;
+ }
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return 0;
+ }
+ }
+
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3361,7 +3561,12 @@ void ath10k_offchan_tx_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
struct ieee80211_hdr *hdr;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
struct sk_buff *skb;
const u8 *peer_addr;
int vdev_id;
@@ -3383,14 +3588,14 @@ void ath10k_offchan_tx_work(struct work_struct *work)
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
- vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
spin_lock_bh(&ar->data_lock);
+ vdev_id = ar->scan.vdev_id;
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
spin_unlock_bh(&ar->data_lock);
@@ -3400,7 +3605,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+ peer_addr,
WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3413,12 +3619,33 @@ void ath10k_offchan_tx_work(struct work_struct *work)
ar->offchan_tx_skb = skb;
spin_unlock_bh(&ar->data_lock);
- ath10k_mac_tx(ar, skb);
+ /* It's safe to access vif and sta - conf_mutex guarantees that
+ * sta_state() and remove_interface() are locked exclusively
+ * out wrt to this offchannel worker.
+ */
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif) {
+ vif = arvif->vif;
+ sta = ieee80211_find_sta(vif, peer_addr);
+ } else {
+ vif = NULL;
+ sta = NULL;
+ }
+
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+ /* not serious */
+ }
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
if (time_left == 0)
- ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
skb);
if (!peer && tmp_peer_created) {
@@ -3465,6 +3692,200 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
}
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq;
+
+ if (!txq)
+ return;
+
+ artxq = (void *)txq->drv_priv;
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ artxq = (void *)txq->drv_priv;
+ spin_lock_bh(&ar->txqs_lock);
+ if (!list_empty(&artxq->list))
+ list_del_init(&artxq->list);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ size_t skb_len;
+ bool is_mgmt, is_presp;
+ int ret;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ struct ath10k_txq *last;
+ int ret;
+ int max;
+
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
+
+ spin_lock_bh(&ar->txqs_lock);
+ rcu_read_lock();
+
+ last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+ while (!list_empty(&ar->txqs)) {
+ artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ txq = container_of((void *)artxq, struct ieee80211_txq,
+ drv_priv);
+
+ /* Prevent aggressive sta/tid taking over tx queue */
+ max = 16;
+ ret = 0;
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+
+ list_del_init(&artxq->list);
+ if (ret != -ENOENT)
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ ath10k_htt_tx_txq_update(hw, txq);
+
+ if (artxq == last || (ret < 0 && ret != -ENOENT))
+ break;
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->txqs_lock);
+}
+
/************/
/* Scanning */
/************/
@@ -3478,19 +3899,24 @@ void __ath10k_scan_finish(struct ath10k *ar)
break;
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
- if (!ar->scan.is_roc)
- ieee80211_scan_completed(ar->hw,
- (ar->scan.state ==
- ATH10K_SCAN_ABORTING));
- else if (ar->scan.roc_notify)
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH10K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
+ }
/* fall through */
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
ath10k_offchan_tx_purge(ar);
cancel_delayed_work(&ar->scan.timeout);
- complete_all(&ar->scan.completed);
+ complete(&ar->scan.completed);
break;
}
}
@@ -3519,7 +3945,7 @@ static int ath10k_scan_stop(struct ath10k *ar)
goto out;
}
- ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
ret = -ETIMEDOUT;
@@ -3599,7 +4025,7 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
if (ret == 0) {
ret = ath10k_scan_stop(ar);
if (ret)
@@ -3626,67 +4052,100 @@ static int ath10k_start_scan(struct ath10k *ar,
/* mac80211 callbacks */
/**********************/
-static void ath10k_tx(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
+ struct ieee80211_txq *txq = NULL;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ bool is_presp;
+ int ret;
- /* We should disable CCK RATE due to P2P */
- if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
- ATH10K_SKB_CB(skb)->htt.is_offchan = false;
- ATH10K_SKB_CB(skb)->htt.freq = 0;
- ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
- ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
- ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
- ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
- ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
- switch (ATH10K_SKB_CB(skb)->txmode) {
- case ATH10K_HW_TXRX_MGMT:
- case ATH10K_HW_TXRX_NATIVE_WIFI:
- ath10k_tx_h_nwifi(hw, skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
- ath10k_tx_h_seq_no(vif, skb);
- break;
- case ATH10K_HW_TXRX_ETHERNET:
- ath10k_tx_h_8023(skb);
- break;
- case ATH10K_HW_TXRX_RAW:
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
- WARN_ON_ONCE(1);
- ieee80211_free_txskb(hw, skb);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ ret = ath10k_htt_tx_inc_pending(htt);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+ ret);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
return;
}
+ spin_unlock_bh(&ar->htt.tx_lock);
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
- spin_lock_bh(&ar->data_lock);
- ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq;
- ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
- spin_unlock_bh(&ar->data_lock);
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+ return;
+ }
+}
- if (ath10k_mac_need_offchan_tx_work(ar)) {
- ATH10K_SKB_CB(skb)->htt.freq = 0;
- ATH10K_SKB_CB(skb)->htt.is_offchan = true;
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_txq *f_txq;
+ struct ath10k_txq *f_artxq;
+ int ret = 0;
+ int max = 16;
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
- skb);
+ spin_lock_bh(&ar->txqs_lock);
+ if (list_empty(&artxq->list))
+ list_add_tail(&artxq->list, &ar->txqs);
- skb_queue_tail(&ar->offchan_tx_queue, skb);
- ieee80211_queue_work(hw, &ar->offchan_tx_work);
- return;
- }
+ f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+ list_del_init(&f_artxq->list);
+
+ while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ if (ret)
+ break;
}
+ if (ret != -ENOENT)
+ list_add_tail(&f_artxq->list, &ar->txqs);
+ spin_unlock_bh(&ar->txqs_lock);
- ath10k_mac_tx(ar, skb);
+ ath10k_htt_tx_txq_update(hw, f_txq);
+ ath10k_htt_tx_txq_update(hw, txq);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -3826,6 +4285,9 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
+ if (ar->cfg_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -3845,7 +4307,8 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
- ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+ ht_cap.cap |=
+ WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
@@ -3862,7 +4325,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
ht_cap.cap |= smps;
}
- if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
@@ -3907,14 +4370,11 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
vht_cap = ath10k_create_vht_cap(ar);
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->ht_cap = ht_cap;
-
- /* Enable the VHT support at 2.4 GHz */
- band->vht_cap = vht_cap;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->ht_cap = ht_cap;
band->vht_cap = vht_cap;
}
@@ -3972,12 +4432,12 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- u32 burst_enable;
+ u32 param;
int ret = 0;
/*
* This makes sense only when restarting hw. It is harmless to call
- * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * unconditionally. This is necessary to make sure no HTT/WMI tx
* commands will be submitted while restarting.
*/
ath10k_drain_tx(ar);
@@ -4009,19 +4469,22 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_off;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "Could not init core: %d\n", ret);
goto err_power_down;
}
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
+ param = ar->wmi.pdev_param->pmf_qos;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
goto err_core_stop;
}
- ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
+ param = ar->wmi.pdev_param->dynamic_bw;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
goto err_core_stop;
@@ -4037,8 +4500,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
- burst_enable = ar->wmi.pdev_param->burst_enable;
- ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0);
+ param = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar, "failed to disable burst: %d\n", ret);
goto err_core_stop;
@@ -4056,8 +4519,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
* this problem.
*/
- ret = ath10k_wmi_pdev_set_param(ar,
- ar->wmi.pdev_param->arp_ac_override, 0);
+ param = ar->wmi.pdev_param->arp_ac_override;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
if (ret) {
ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
ret);
@@ -4065,7 +4528,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
WMI_CCA_DETECT_LEVEL_AUTO,
WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4076,8 +4539,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
- ret = ath10k_wmi_pdev_set_param(ar,
- ar->wmi.pdev_param->ani_enable, 1);
+ param = ar->wmi.pdev_param->ani_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
if (ret) {
ath10k_warn(ar, "failed to enable ani by default: %d\n",
ret);
@@ -4086,6 +4549,31 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->ani_enabled = true;
+ if (ath10k_peer_stats_enabled(ar)) {
+ param = ar->wmi.pdev_param->peer_stats_update_period;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ PEER_DEFAULT_STATS_UPDATE_PERIOD);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set peer stats period : %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set btcoex param: %d\n", ret);
+ goto err_core_stop;
+ }
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ }
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
@@ -4287,6 +4775,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
enum wmi_sta_powersave_param param;
int ret = 0;
u32 value;
@@ -4299,6 +4788,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
memset(arvif, 0, sizeof(*arvif));
+ ath10k_mac_txq_init(vif->txq);
arvif->ar = ar;
arvif->vif = vif;
@@ -4333,24 +4823,30 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
bit, ar->free_vdev_map);
arvif->vdev_id = bit;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+ arvif->vdev_subtype =
+ ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
break;
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
break;
case NL80211_IFTYPE_ADHOC:
arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_MESH_11S);
+ } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
ret = -EINVAL;
ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
goto err;
@@ -4361,7 +4857,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vdev_type = WMI_VDEV_TYPE_AP;
if (vif->p2p)
- arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_GO);
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
@@ -4475,13 +4972,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
- WMI_PEER_TYPE_DEFAULT);
+ ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+ vif->addr, WMI_PEER_TYPE_DEFAULT);
if (ret) {
ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ret = -ENOENT;
+ goto err_peer_delete;
+ }
+
+ arvif->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+ } else {
+ arvif->peer_id = HTT_INVALID_PEERID;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4592,7 +5107,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
int ret;
+ int i;
cancel_work_sync(&arvif->ap_csa_work);
cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4646,7 +5163,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->vif == vif) {
+ ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->vif = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ ath10k_mac_txq_unref(ar, vif->txq);
if (vif->type == NL80211_IFTYPE_MONITOR) {
ar->monitor_arvif = NULL;
@@ -4659,6 +5191,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_mac_vif_tx_unlock_all(arvif);
spin_unlock_bh(&ar->htt.tx_lock);
+ ath10k_mac_txq_unref(ar, vif->txq);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -4690,7 +5224,7 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
ret = ath10k_monitor_recalc(ar);
if (ret)
- ath10k_warn(ar, "failed to recalc montior: %d\n", ret);
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
mutex_unlock(&ar->conf_mutex);
}
@@ -5188,7 +5722,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
@@ -5363,13 +5897,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
int ret = 0;
+ int i;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_init(sta->txq[i]);
}
/* cancel must be done outside the mutex to avoid deadlock */
@@ -5404,8 +5943,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (sta->tdls)
peer_type = WMI_PEER_TYPE_TDLS;
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
- peer_type);
+ ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+ sta->addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -5413,6 +5952,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ arsta->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+
if (!sta->tdls)
goto exit;
@@ -5465,8 +6022,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
* Existing station deletion.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac vdev %d peer delete %pM (sta gone)\n",
- arvif->vdev_id, sta->addr);
+ "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ arvif->vdev_id, sta->addr, sta);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
@@ -5475,6 +6032,31 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->sta == sta) {
+ ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+ sta->addr, peer, i, arvif->vdev_id);
+ peer->sta = NULL;
+
+ /* Clean up the peer object as well since we
+ * must have failed to do this above.
+ */
+ list_del(&peer->list);
+ ar->peer_map[i] = NULL;
+ kfree(peer);
+ ar->num_peers--;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_unref(ar, sta->txq[i]);
+
if (!sta->tdls)
goto exit;
@@ -5721,7 +6303,7 @@ exit:
return ret;
}
-#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5785,7 +6367,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
- ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to switch to channel for roc scan\n");
@@ -5937,6 +6519,39 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+ (ar->rx_channel != channel))
+ return;
+
+ if (ar->scan.state != ATH10K_SCAN_IDLE) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (!ret) {
+ ath10k_warn(ar, "bss channel survey timed out\n");
+ return;
+ }
+}
+
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
@@ -5947,20 +6562,22 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&ar->conf_mutex);
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
@@ -5977,7 +6594,7 @@ exit:
static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
@@ -5996,7 +6613,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
@@ -6045,7 +6662,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
@@ -6146,7 +6763,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
@@ -6198,7 +6815,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u8 rate;
@@ -6349,14 +6966,41 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+ int ret;
+
+ /* Workaround:
+ *
+ * Given tsf argument is entire TSF value, but firmware accepts
+ * only TSF offset to current TSF.
+ *
+ * get_tsf function is used to get offset value, however since
+ * ath10k_get_tsf is not implemented properly, it will return 0 always.
+ * Luckily all the caller functions to set_tsf, as of now, also rely on
+ * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+ * final tsf offset value to firmware will be arithmetically correct.
+ */
+ tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, tsf_offset);
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
arvif->vdev_id, sta->addr, tid, action);
@@ -6528,7 +7172,7 @@ ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx add freq %hu width %d ptr %p\n",
+ "mac chanctx add freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6552,7 +7196,7 @@ ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx remove freq %hu width %d ptr %p\n",
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6617,7 +7261,7 @@ ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx change freq %hu width %d ptr %p changed %x\n",
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -6675,7 +7319,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx assign ptr %p vdev_id %i\n",
+ "mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
if (WARN_ON(arvif->is_started)) {
@@ -6736,7 +7380,7 @@ ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC,
- "mac chanctx unassign ptr %p vdev_id %i\n",
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
@@ -6782,7 +7426,8 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ath10k_ops = {
- .tx = ath10k_tx,
+ .tx = ath10k_mac_op_tx,
+ .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
.start = ath10k_start,
.stop = ath10k_stop,
.config = ath10k_config,
@@ -6809,6 +7454,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
+ .set_tsf = ath10k_set_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
@@ -6828,11 +7474,12 @@ static const struct ieee80211_ops ath10k_ops = {
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = ath10k_sta_add_debugfs,
+ .sta_statistics = ath10k_sta_statistics,
#endif
};
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -6841,7 +7488,7 @@ static const struct ieee80211_ops ath10k_ops = {
}
#define CHAN5G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -6897,54 +7544,69 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
struct ath10k *ath10k_mac_create(size_t priv_size)
{
struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
struct ath10k *ar;
- hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops);
- if (!hw)
+ ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
+ if (!ops)
+ return NULL;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
+ if (!hw) {
+ kfree(ops);
return NULL;
+ }
ar = hw->priv;
ar->hw = hw;
+ ar->ops = ops;
return ar;
}
void ath10k_mac_destroy(struct ath10k *ar)
{
+ struct ieee80211_ops *ops = ar->ops;
+
ieee80211_free_hw(ar->hw);
+ kfree(ops);
}
static const struct ieee80211_iface_limit ath10k_if_limits[] = {
{
- .max = 8,
- .types = BIT(NL80211_IFTYPE_STATION)
- | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
},
{
- .max = 3,
- .types = BIT(NL80211_IFTYPE_P2P_GO)
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
},
{
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
},
{
- .max = 7,
- .types = BIT(NL80211_IFTYPE_AP)
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- | BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
},
};
static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
{
- .max = 8,
- .types = BIT(NL80211_IFTYPE_AP)
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- | BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ },
};
static const struct ieee80211_iface_combination ath10k_if_comb[] = {
@@ -7157,13 +7819,19 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
band->channels = channels;
- band->n_bitrates = ath10k_g_rates_size;
- band->bitrates = ath10k_g_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ if (ar->hw_params.cck_rate_map_rev2) {
+ band->n_bitrates = ath10k_g_rates_rev2_size;
+ band->bitrates = ath10k_g_rates_rev2;
+ } else {
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ }
+
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7175,12 +7843,12 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath10k_a_rates_size;
band->bitrates = ath10k_a_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
}
ath10k_mac_setup_ht_vht_cap(ar);
@@ -7193,7 +7861,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
- if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -7233,6 +7901,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->txq_data_size = sizeof(struct ath10k_txq);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
@@ -7257,7 +7926,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
@@ -7281,7 +7951,7 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
@@ -7323,7 +7993,7 @@ int ath10k_mac_register(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
ar->ath_common.debug_mask = ATH_DBG_DFS;
ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
@@ -7333,6 +8003,15 @@ int ath10k_mac_register(struct ath10k *ar)
ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
}
+ /* Current wake_tx_queue implementation imposes a significant
+ * performance penalty in some setups. The tx scheduling code needs
+ * more work anyway so disable the wake_tx_queue unless firmware
+ * supports the pull-push mechanism.
+ */
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->ops->wake_tx_queue = NULL;
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
@@ -7362,12 +8041,12 @@ err_unregister:
ieee80211_unregister_hw(ar->hw);
err_dfs_detector_exit:
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
@@ -7377,11 +8056,11 @@ void ath10k_mac_unregister(struct ath10k *ar)
{
ieee80211_unregister_hw(ar->hw);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index e3cefe4c7cfd..1bd29ecfcdcc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -66,7 +66,7 @@ void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
enum wmi_tlv_tx_pause_action action);
u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
- u8 hw_rate);
+ u8 hw_rate, bool cck);
u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate);
@@ -74,6 +74,14 @@ void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 930785a724e1..9fbeb7e5ab2d 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -33,12 +33,6 @@
#include "ce.h"
#include "pci.h"
-enum ath10k_pci_irq_mode {
- ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
- ATH10K_PCI_IRQ_MSI = 2,
-};
-
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -62,7 +56,10 @@ static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
{0}
};
@@ -87,14 +84,19 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+ { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
+
+ { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
+
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+
+ { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
static int ath10k_pci_cold_reset(struct ath10k *ar);
static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
-static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
static int ath10k_pci_init_irq(struct ath10k *ar);
static int ath10k_pci_deinit_irq(struct ath10k *ar);
static int ath10k_pci_request_irq(struct ath10k *ar);
@@ -107,6 +109,7 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static struct ce_attr host_ce_config_wlan[] = {
@@ -186,6 +189,7 @@ static struct ce_attr host_ce_config_wlan[] = {
.src_nentries = 0,
.src_sz_max = 2048,
.dest_nentries = 128,
+ .recv_cb = ath10k_pci_pktlog_rx_cb,
},
/* CE9 target autonomous qcache memcpy */
@@ -485,6 +489,9 @@ static int ath10k_pci_force_wake(struct ath10k *ar)
unsigned long flags;
int ret = 0;
+ if (ar_pci->pci_ps)
+ return ret;
+
spin_lock_irqsave(&ar_pci->ps_lock, flags);
if (!ar_pci->ps_awake) {
@@ -615,7 +622,7 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar)
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
-void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -637,7 +644,7 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
ath10k_pci_sleep(ar);
}
-u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
u32 val;
@@ -662,6 +669,20 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return val;
}
+inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ar_pci->bus_ops->write32(ar, offset, value);
+}
+
+inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ar_pci->bus_ops->read32(ar, offset);
+}
+
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
@@ -682,7 +703,7 @@ void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
}
-static bool ath10k_pci_irq_pending(struct ath10k *ar)
+bool ath10k_pci_irq_pending(struct ath10k *ar)
{
u32 cause;
@@ -695,7 +716,7 @@ static bool ath10k_pci_irq_pending(struct ath10k *ar)
return false;
}
-static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
@@ -711,7 +732,7 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
PCIE_INTR_ENABLE_ADDRESS);
}
-static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS,
@@ -727,10 +748,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- if (ar_pci->num_msi_intrs > 1)
- return "msi-x";
-
- if (ar_pci->num_msi_intrs == 1)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
return "msi";
return "legacy";
@@ -791,7 +809,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
spin_lock_bh(&ar_pci->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
- while (num--) {
+
+ while (num >= 0) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
if (ret == -ENOSPC)
@@ -801,10 +820,11 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ATH10K_PCI_RX_POST_RETRY_MS);
break;
}
+ num--;
}
}
-static void ath10k_pci_rx_post(struct ath10k *ar)
+void ath10k_pci_rx_post(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
@@ -813,7 +833,7 @@ static void ath10k_pci_rx_post(struct ath10k *ar)
ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
}
-static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
+void ath10k_pci_rx_replenish_retry(unsigned long ptr)
{
struct ath10k *ar = (void *)ptr;
@@ -826,13 +846,17 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
break;
+ case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
}
@@ -851,10 +875,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -872,9 +894,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -882,15 +905,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
ret = -ENOMEM;
goto done;
}
- memset(data_buf, 0, orig_nbytes);
+ memset(data_buf, 0, alloc_nbytes);
- remaining_bytes = orig_nbytes;
+ remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -921,9 +944,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -937,25 +961,28 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != ce_data) {
+ if (*buf != ce_data) {
ret = -EIO;
goto done;
}
remaining_bytes -= nbytes;
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+ address, ret);
+ break;
+ }
+ memcpy(data, data_buf, nbytes);
+
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
- if (ret == 0)
- memcpy(data, data_buf, orig_nbytes);
- else
- ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
- address, ret);
if (data_buf)
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1002,15 +1029,13 @@ static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
__ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
-static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
- const void *data, int nbytes)
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
@@ -1059,7 +1084,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1084,9 +1109,10 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1100,7 +1126,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- if (buf != address) {
+ if (*buf != address) {
ret = -EIO;
goto done;
}
@@ -1162,15 +1188,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
- u32 ce_data;
unsigned int nbytes, max_nbytes;
- unsigned int transfer_id;
- unsigned int flags;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
- &ce_data, &nbytes, &transfer_id,
- &flags) == 0) {
+ &nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1199,6 +1221,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
ath10k_pci_rx_post_pipe(pipe_info);
}
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -1215,6 +1294,15 @@ static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
}
+/* Called by lower (CE) layer when data is received from the Target.
+ * Only 10.4 firmware uses separate CE to transfer pktlog data.
+ */
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state,
+ ath10k_htt_rx_pktlog_completion_handler);
+}
+
/* Called by lower (CE) layer when a send to HTT Target completes. */
static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -1246,11 +1334,11 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
- ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
}
-static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
- struct ath10k_hif_sg_item *items, int n_items)
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
@@ -1318,13 +1406,13 @@ err:
return err;
}
-static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
- size_t buf_len)
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
{
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
}
-static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1392,8 +1480,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
queue_work(ar->workqueue, &ar->restart_work);
}
-static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
- int force)
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
{
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
@@ -1418,22 +1506,15 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-static void ath10k_pci_kill_tasklet(struct ath10k *ar)
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
-
- tasklet_kill(&ar_pci->intr_tq);
- tasklet_kill(&ar_pci->msi_fw_err);
-
- for (i = 0; i < CE_COUNT; i++)
- tasklet_kill(&ar_pci->pipe_info[i].intr);
del_timer_sync(&ar_pci->rx_post_retry);
}
-static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
- u8 *ul_pipe, u8 *dl_pipe)
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
{
const struct service_to_pipe *entry;
bool ul_set = false, dl_set = false;
@@ -1477,8 +1558,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
return 0;
}
-static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
- u8 *ul_pipe, u8 *dl_pipe)
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
{
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
@@ -1487,12 +1568,13 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
ul_pipe, dl_pipe);
}
-static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
u32 val;
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1502,6 +1584,9 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI.
*/
@@ -1515,6 +1600,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
@@ -1524,6 +1610,9 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
CORE_CTRL_ADDRESS, val);
break;
case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq/MSI.
*/
@@ -1541,10 +1630,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar)
static void ath10k_pci_irq_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- synchronize_irq(ar_pci->pdev->irq + i);
+ synchronize_irq(ar_pci->pdev->irq);
}
static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1604,14 +1691,12 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
{
struct ath10k *ar;
- struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct sk_buff *skb;
int i;
ar = pci_pipe->hif_ce_state;
- ar_pci = ath10k_pci_priv(ar);
ce_pipe = pci_pipe->ce_hdl;
ce_ring = ce_pipe->src_ring;
@@ -1654,7 +1739,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
}
}
-static void ath10k_pci_ce_deinit(struct ath10k *ar)
+void ath10k_pci_ce_deinit(struct ath10k *ar)
{
int i;
@@ -1662,9 +1747,9 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
ath10k_ce_deinit_pipe(ar, i);
}
-static void ath10k_pci_flush(struct ath10k *ar)
+void ath10k_pci_flush(struct ath10k *ar)
{
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
ath10k_pci_buffer_cleanup(ar);
}
@@ -1691,15 +1776,17 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
spin_lock_irqsave(&ar_pci->ps_lock, flags);
WARN_ON(ar_pci->ps_wake_refcount > 0);
spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
}
-static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
- void *req, u32 req_len,
- void *resp, u32 *resp_len)
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
@@ -1742,7 +1829,7 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
DMA_FROM_DEVICE);
ret = dma_mapping_error(ar->dev, resp_paddr);
if (ret) {
- ret = EIO;
+ ret = -EIO;
goto err_req;
}
@@ -1805,13 +1892,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
- u32 ce_data;
unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
- if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
- &nbytes, &transfer_id, &flags))
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
return;
if (WARN_ON_ONCE(!xfer))
@@ -1868,6 +1952,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
case QCA99X0_2_0_DEVICE_ID:
+ case QCA9888_2_0_DEVICE_ID:
+ case QCA9984_1_0_DEVICE_ID:
+ case QCA9887_1_0_DEVICE_ID:
return 1;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
@@ -1893,7 +1980,14 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
return 1;
}
-static int ath10k_pci_init_config(struct ath10k *ar)
+static int ath10k_bus_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ return ar_pci->bus_ops->get_num_banks(ar);
+}
+
+int ath10k_pci_init_config(struct ath10k *ar)
{
u32 interconnect_targ_addr;
u32 pcie_state_targ_addr = 0;
@@ -2004,7 +2098,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
/* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK);
- ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
+ ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK);
@@ -2057,7 +2151,7 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar)
target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
}
-static int ath10k_pci_alloc_pipes(struct ath10k *ar)
+int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe;
@@ -2088,7 +2182,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_free_pipes(struct ath10k *ar)
+void ath10k_pci_free_pipes(struct ath10k *ar)
{
int i;
@@ -2096,7 +2190,7 @@ static void ath10k_pci_free_pipes(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
-static int ath10k_pci_init_pipes(struct ath10k *ar)
+int ath10k_pci_init_pipes(struct ath10k *ar)
{
int i, ret;
@@ -2127,6 +2221,14 @@ static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
}
+static bool ath10k_pci_has_device_gone(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ return (val == 0xffffffff);
+}
+
/* this function effectively clears target memory controller assert line */
static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
{
@@ -2222,16 +2324,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0;
}
+static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
+{
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+}
+
static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
- return ath10k_pci_warm_reset(ar);
- } else if (QCA_REV_99X0(ar)) {
- ath10k_pci_irq_disable(ar);
- return ath10k_pci_qca99x0_chip_reset(ar);
- } else {
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->pci_soft_reset)
return -ENOTSUPP;
- }
+
+ return ar_pci->pci_soft_reset(ar);
}
static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
@@ -2366,16 +2472,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
- if (QCA_REV_988X(ar))
- return ath10k_pci_qca988x_chip_reset(ar);
- else if (QCA_REV_6174(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_9377(ar))
- return ath10k_pci_qca6174_chip_reset(ar);
- else if (QCA_REV_99X0(ar))
- return ath10k_pci_qca99x0_chip_reset(ar);
- else
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON(!ar_pci->pci_hard_reset))
return -ENOTSUPP;
+
+ return ar_pci->pci_hard_reset(ar);
}
static int ath10k_pci_hif_power_up(struct ath10k *ar)
@@ -2429,6 +2531,7 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
+ napi_enable(&ar->napi);
return 0;
@@ -2439,7 +2542,7 @@ err_sleep:
return ret;
}
-static void ath10k_pci_hif_power_down(struct ath10k *ar)
+void ath10k_pci_hif_power_down(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
@@ -2469,12 +2572,10 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
u32 val;
int ret = 0;
- if (ar_pci->pci_ps == 0) {
- ret = ath10k_pci_force_wake(ar);
- if (ret) {
- ath10k_err(ar, "failed to wake up target: %d\n", ret);
- return ret;
- }
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
}
/* Suspend/Resume resets the PCI configuration space, so we have to
@@ -2490,6 +2591,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
}
#endif
+static bool ath10k_pci_validate_cal(void *data, size_t size)
+{
+ __le16 *cal_words = data;
+ u16 checksum = 0;
+ size_t i;
+
+ if (size % 2 != 0)
+ return false;
+
+ for (i = 0; i < size / 2; i++)
+ checksum ^= le16_to_cpu(cal_words[i]);
+
+ return checksum == 0xffff;
+}
+
+static void ath10k_pci_enable_eeprom(struct ath10k *ar)
+{
+ /* Enable SI clock */
+ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
+
+ /* Configure GPIOs for I2C operation */
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
+ SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
+ GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
+ SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS +
+ QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
+ 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
+
+ /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
+ ath10k_pci_write32(ar,
+ SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
+ SM(1, SI_CONFIG_ERR_INT) |
+ SM(1, SI_CONFIG_BIDIR_OD_DATA) |
+ SM(1, SI_CONFIG_I2C) |
+ SM(1, SI_CONFIG_POS_SAMPLE) |
+ SM(1, SI_CONFIG_INACTIVE_DATA) |
+ SM(1, SI_CONFIG_INACTIVE_CLK) |
+ SM(8, SI_CONFIG_DIVIDER));
+}
+
+static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
+{
+ u32 reg;
+ int wait_limit;
+
+ /* set device select byte and for the read operation */
+ reg = QCA9887_EEPROM_SELECT_READ |
+ SM(addr, QCA9887_EEPROM_ADDR_LO) |
+ SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
+
+ /* write transmit data, transfer length, and START bit */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
+ SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
+ SM(4, SI_CS_TX_CNT));
+
+ /* wait max 1 sec */
+ wait_limit = 100000;
+
+ /* wait for SI_CS_DONE_INT */
+ do {
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
+ if (MS(reg, SI_CS_DONE_INT))
+ break;
+
+ wait_limit--;
+ udelay(10);
+ } while (wait_limit > 0);
+
+ if (!MS(reg, SI_CS_DONE_INT)) {
+ ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ /* clear SI_CS_DONE_INT */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
+
+ if (MS(reg, SI_CS_DONE_ERR)) {
+ ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
+ return -EIO;
+ }
+
+ /* extract receive data */
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
+ *out = reg;
+
+ return 0;
+}
+
+static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
+ size_t *data_len)
+{
+ u8 *caldata = NULL;
+ size_t calsize, i;
+ int ret;
+
+ if (!QCA_REV_9887(ar))
+ return -EOPNOTSUPP;
+
+ calsize = ar->hw_params.cal_data_len;
+ caldata = kmalloc(calsize, GFP_KERNEL);
+ if (!caldata)
+ return -ENOMEM;
+
+ ath10k_pci_enable_eeprom(ar);
+
+ for (i = 0; i < calsize; i++) {
+ ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
+ if (ret)
+ goto err_free;
+ }
+
+ if (!ath10k_pci_validate_cal(caldata, calsize))
+ goto err_free;
+
+ *data = caldata;
+ *data_len = calsize;
+
+ return 0;
+
+err_free:
+ kfree(caldata);
+
+ return -EINVAL;
+}
+
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -2509,67 +2748,9 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.suspend = ath10k_pci_hif_suspend,
.resume = ath10k_pci_hif_resume,
#endif
+ .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
};
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
- struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
- struct ath10k_pci *ar_pci = pipe->ar_pci;
-
- ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
- struct ath10k *ar = (struct ath10k *)data;
-
- if (!ath10k_pci_has_fw_crashed(ar)) {
- ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
- return;
- }
-
- ath10k_pci_irq_disable(ar);
- ath10k_pci_fw_crashed_clear(ar);
- ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
- if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
- ce_id);
- return IRQ_HANDLED;
- }
-
- /*
- * NOTE: We are able to derive ce_id from irq because we
- * use a one-to-one mapping for CE's 0..5.
- * CE's 6 & 7 do not use interrupts at all.
- *
- * This mapping must be kept in sync with the mapping
- * used by firmware.
- */
- tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_schedule(&ar_pci->msi_fw_err);
- return IRQ_HANDLED;
-}
-
/*
* Top-level interrupt handler for all PCI interrupts from a Target.
* When a block of MSI interrupts is allocated, this top-level handler
@@ -2581,77 +2762,62 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- if (ar_pci->pci_ps == 0) {
- ret = ath10k_pci_force_wake(ar);
- if (ret) {
- ath10k_warn(ar, "failed to wake device up on irq: %d\n",
- ret);
- return IRQ_NONE;
- }
- }
-
- if (ar_pci->num_msi_intrs == 0) {
- if (!ath10k_pci_irq_pending(ar))
- return IRQ_NONE;
+ if (ath10k_pci_has_device_gone(ar))
+ return IRQ_NONE;
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
+ return IRQ_NONE;
}
- tasklet_schedule(&ar_pci->intr_tq);
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ !ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
-static void ath10k_pci_tasklet(unsigned long data)
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
{
- struct ath10k *ar = (struct ath10k *)data;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
if (ath10k_pci_has_fw_crashed(ar)) {
- ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
- return;
+ napi_complete(ctx);
+ return done;
}
ath10k_ce_per_engine_service_any(ar);
- /* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->num_msi_intrs == 0)
- ath10k_pci_enable_legacy_irq(ar);
-}
-
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret, i;
-
- ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
- ath10k_pci_msi_fw_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
- ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
- return ret;
- }
-
- for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
- ret = request_irq(ar_pci->pdev->irq + i,
- ath10k_pci_per_engine_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
- ar_pci->pdev->irq + i, ret);
-
- for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
- free_irq(ar_pci->pdev->irq + i, ar);
-
- free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- return ret;
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+ * will not be handled again. This is causing failure to
+ * complete boot sequence in x86 platform. So before enabling
+ * interrupts safer to check for pending interrupts for
+ * immediate servicing.
+ */
+ if (CE_INTERRUPT_SUMMARY(ar)) {
+ napi_reschedule(ctx);
+ goto out;
}
+ ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
}
- return 0;
+out:
+ return done;
}
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
@@ -2692,41 +2858,27 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
return ath10k_pci_request_irq_legacy(ar);
- case 1:
+ case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
- return ath10k_pci_request_irq_msix(ar);
+ return -EINVAL;
}
}
static void ath10k_pci_free_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ free_irq(ar_pci->pdev->irq, ar);
}
-static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
+void ath10k_pci_init_napi(struct ath10k *ar)
{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
-
- tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
- tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long)ar);
-
- for (i = 0; i < CE_COUNT; i++) {
- ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
- (unsigned long)&ar_pci->pipe_info[i]);
- }
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
+ ATH10K_NAPI_BUDGET);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2734,26 +2886,15 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
- ath10k_pci_init_irq_tasklets(ar);
+ ath10k_pci_init_napi(ar);
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
ath10k_info(ar, "limiting irq mode to: %d\n",
ath10k_pci_irq_mode);
- /* Try MSI-X */
- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
- ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
- ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
- ar_pci->num_msi_intrs);
- if (ret > 0)
- return 0;
-
- /* fall-through */
- }
-
/* Try MSI */
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
- ar_pci->num_msi_intrs = 1;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
return 0;
@@ -2769,7 +2910,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
* synchronization checking. */
- ar_pci->num_msi_intrs = 0;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2787,8 +2928,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
ath10k_pci_deinit_irq_legacy(ar);
break;
default:
@@ -2799,7 +2940,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+int ath10k_pci_wait_for_target_init(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned long timeout;
@@ -2826,7 +2967,7 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
/* Fix potential race by repeating CORE_BASE writes */
ath10k_pci_enable_legacy_irq(ar);
@@ -2937,7 +3078,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
goto err_master;
}
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
return 0;
err_master:
@@ -2980,6 +3121,44 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
return false;
}
+int ath10k_pci_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ spin_lock_init(&ar_pci->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+
+ setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
+ (unsigned long)ar);
+
+ if (QCA_REV_6174(ar))
+ ath10k_pci_override_ce_config(ar);
+
+ ret = ath10k_pci_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_pci_release_resource(struct ath10k *ar)
+{
+ ath10k_pci_rx_retry_sync(ar);
+ netif_napi_del(&ar->napi);
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_free_pipes(ar);
+}
+
+static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
+ .read32 = ath10k_bus_pci_read32,
+ .write32 = ath10k_bus_pci_write32,
+ .get_num_banks = ath10k_pci_get_num_banks,
+};
+
static int ath10k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
@@ -2989,24 +3168,52 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
enum ath10k_hw_rev hw_rev;
u32 chip_id;
bool pci_ps;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ break;
+ case QCA9888_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9888;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
+ pci_soft_reset = NULL;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
break;
default:
WARN_ON(1);
@@ -3030,55 +3237,47 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->ar = ar;
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
+ ar_pci->bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
ar->id.subsystem_vendor = pdev->subsystem_vendor;
ar->id.subsystem_device = pdev->subsystem_device;
- spin_lock_init(&ar_pci->ce_lock);
- spin_lock_init(&ar_pci->ps_lock);
-
- setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
- (unsigned long)ar);
setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
(unsigned long)ar);
- ret = ath10k_pci_claim(ar);
+ ret = ath10k_pci_setup_resource(ar);
if (ret) {
- ath10k_err(ar, "failed to claim device: %d\n", ret);
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
goto err_core_destroy;
}
- if (QCA_REV_6174(ar))
- ath10k_pci_override_ce_config(ar);
+ ret = ath10k_pci_claim(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_free_pipes;
+ }
- ret = ath10k_pci_alloc_pipes(ar);
+ ret = ath10k_pci_force_wake(ar);
if (ret) {
- ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
- ret);
+ ath10k_warn(ar, "failed to wake up device : %d\n", ret);
goto err_sleep;
}
ath10k_pci_ce_deinit(ar);
ath10k_pci_irq_disable(ar);
- if (ar_pci->pci_ps == 0) {
- ret = ath10k_pci_force_wake(ar);
- if (ret) {
- ath10k_warn(ar, "failed to wake up device : %d\n", ret);
- goto err_free_pipes;
- }
- }
-
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
- goto err_free_pipes;
+ goto err_sleep;
}
- ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
- ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
ret = ath10k_pci_request_irq(ar);
@@ -3115,18 +3314,18 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_free_irq:
ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
+ ath10k_pci_rx_retry_sync(ar);
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
-err_free_pipes:
- ath10k_pci_free_pipes(ar);
-
err_sleep:
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
err_core_destroy:
ath10k_core_destroy(ar);
@@ -3150,10 +3349,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_core_unregister(ar);
ath10k_pci_free_irq(ar);
- ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
- ath10k_pci_ce_deinit(ar);
- ath10k_pci_free_pipes(ar);
+ ath10k_pci_release_resource(ar);
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
@@ -3177,6 +3374,10 @@ static int __init ath10k_pci_init(void)
printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
ret);
+ ret = ath10k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "ahb init failed: %d\n", ret);
+
return ret;
}
module_init(ath10k_pci_init);
@@ -3184,16 +3385,16 @@ module_init(ath10k_pci_init);
static void __exit ath10k_pci_exit(void)
{
pci_unregister_driver(&ath10k_pci_driver);
+ ath10k_ahb_exit();
}
module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
-MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
/* QCA988x 2.0 firmware files */
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
@@ -3201,6 +3402,11 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+/* QCA9887 1.0 firmware files */
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index f91bf333cb75..9854ad56b2de 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -22,6 +22,7 @@
#include "hw.h"
#include "ce.h"
+#include "ahb.h"
/*
* maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
@@ -147,9 +148,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
-
- struct ath10k_pci *ar_pci;
- struct tasklet_struct intr;
};
struct ath10k_pci_supp_chip {
@@ -157,6 +155,18 @@ struct ath10k_pci_supp_chip {
u32 rev_id;
};
+struct ath10k_bus_ops {
+ u32 (*read32)(struct ath10k *ar, u32 offset);
+ void (*write32)(struct ath10k *ar, u32 offset, u32 value);
+ int (*get_num_banks)(struct ath10k *ar);
+};
+
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
@@ -164,14 +174,8 @@ struct ath10k_pci {
void __iomem *mem;
size_t mem_len;
- /*
- * Number of MSI interrupts granted, 0 --> using legacy PCI line
- * interrupts.
- */
- int num_msi_intrs;
-
- struct tasklet_struct intr_tq;
- struct tasklet_struct msi_fw_err;
+ /* Operating interrupt mode */
+ enum ath10k_pci_irq_mode oper_irq_mode;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
@@ -225,6 +229,20 @@ struct ath10k_pci {
* on MMIO read/write.
*/
bool pci_ps;
+
+ const struct ath10k_bus_ops *bus_ops;
+
+ /* Chip specific pci reset routine used to do a safe reset */
+ int (*pci_soft_reset)(struct ath10k *ar);
+
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
+ /* Keep this entry in the last, memory for struct ath10k_ahb is
+ * allocated (ahb support enabled case) in the continuation of
+ * this struct.
+ */
+ struct ath10k_ahb ahb[0];
};
static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -253,6 +271,40 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes);
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
+ void *resp, u32 *resp_len);
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe,
+ u8 *dl_pipe);
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force);
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
+void ath10k_pci_hif_power_down(struct ath10k *ar);
+int ath10k_pci_alloc_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_rx_replenish_retry(unsigned long ptr);
+void ath10k_pci_ce_deinit(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
+int ath10k_pci_init_pipes(struct ath10k *ar);
+int ath10k_pci_init_config(struct ath10k *ar);
+void ath10k_pci_rx_post(struct ath10k *ar);
+void ath10k_pci_flush(struct ath10k *ar);
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+bool ath10k_pci_irq_pending(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
+int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+int ath10k_pci_setup_resource(struct ath10k *ar);
+void ath10k_pci_release_resource(struct ath10k *ar);
+
/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
* frequently. To avoid this put SoC to sleep after a very conservative grace
* period. Adjust with great care.
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index ca8d16884af1..034e7a54c5b2 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -656,26 +656,6 @@ struct rx_msdu_end {
* Reserved: HW should fill with zero. FW should ignore.
*/
-#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0
-#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1
-
-#define RX_PPDU_START_SIG_RATE_OFDM_48 0
-#define RX_PPDU_START_SIG_RATE_OFDM_24 1
-#define RX_PPDU_START_SIG_RATE_OFDM_12 2
-#define RX_PPDU_START_SIG_RATE_OFDM_6 3
-#define RX_PPDU_START_SIG_RATE_OFDM_54 4
-#define RX_PPDU_START_SIG_RATE_OFDM_36 5
-#define RX_PPDU_START_SIG_RATE_OFDM_18 6
-#define RX_PPDU_START_SIG_RATE_OFDM_9 7
-
-#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0
-#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1
-#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2
-#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3
-#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4
-#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5
-#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6
-
#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
@@ -711,25 +691,6 @@ struct rx_msdu_end {
/* No idea what this flag means. It seems to be always set in rate. */
#define RX_PPDU_START_RATE_FLAG BIT(3)
-enum rx_ppdu_start_rate {
- RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M,
- RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M,
- RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M,
- RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M,
- RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M,
- RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M,
- RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M,
- RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M,
-
- RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M,
- RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M,
- RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M,
- RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M,
- RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M,
- RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M,
- RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M,
-};
-
struct rx_ppdu_start {
struct {
u8 pri20_mhz;
@@ -994,7 +955,41 @@ struct rx_pkt_end {
__le32 info0; /* %RX_PKT_END_INFO0_ */
__le32 phy_timestamp_1;
__le32 phy_timestamp_2;
- __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
+#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB 4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
+
+struct rx_location_info {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
enum rx_phy_ppdu_end_info0 {
@@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end {
struct rx_ppdu_end_qca99x0 {
struct rx_pkt_end rx_pkt_end;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_location_info rx_location_info;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
@@ -1080,6 +1086,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
+ struct rx_ppdu_end_qca9984 qca9984;
} __packed;
} __packed;
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 4671cfbcd8f7..7d9b0da1b010 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -101,9 +101,9 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
break;
case 80:
/* TODO: As experiments with an analogue sender and various
- * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+ * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz)
* show, the particular configuration of 80 MHz/64 bins does
- * not match with the other smaples at all. Until the reason
+ * not match with the other samples at all. Until the reason
* for that is found, don't report these samples.
*/
if (bin_len == 64)
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 3ca3fae408a7..adf4592374b4 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -135,26 +135,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
}
int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type)
+ const struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
- switch (type) {
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
- if (!ar->swap.firmware_swap_code_seg_info)
- return 0;
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
- seg_info = ar->swap.firmware_swap_code_seg_info;
- break;
- default:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
- ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
- type);
+ if (!fw_file->firmware_swap_code_seg_info)
return 0;
- }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+ seg_info = fw_file->firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@@ -168,32 +159,41 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar,
return 0;
}
-void ath10k_swap_code_seg_release(struct ath10k *ar)
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
- ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
- ar->swap.firmware_codeswap_data = NULL;
- ar->swap.firmware_codeswap_len = 0;
- ar->swap.firmware_swap_code_seg_info = NULL;
+ ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
+
+ /* FIXME: these two assignments look to bein wrong place! Shouldn't
+ * they be in ath10k_core_free_firmware_files() like the rest?
+ */
+ fw_file->codeswap_data = NULL;
+ fw_file->codeswap_len = 0;
+
+ fw_file->firmware_swap_code_seg_info = NULL;
}
-int ath10k_swap_code_seg_init(struct ath10k *ar)
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ codeswap_data = fw_file->codeswap_data;
+ codeswap_len = fw_file->codeswap_len;
- if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+ if (!codeswap_len || !codeswap_data)
return 0;
- seg_info = ath10k_swap_code_seg_alloc(ar,
- ar->swap.firmware_codeswap_len);
+ seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
if (!seg_info) {
ath10k_err(ar, "failed to allocate fw code swap segment\n");
return -ENOMEM;
}
ret = ath10k_swap_code_seg_fill(ar, seg_info,
- ar->swap.firmware_codeswap_data,
- ar->swap.firmware_codeswap_len);
+ codeswap_data, codeswap_len);
if (ret) {
ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
@@ -202,7 +202,7 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
return ret;
}
- ar->swap.firmware_swap_code_seg_info = seg_info;
+ fw_file->firmware_swap_code_seg_info = seg_info;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 5c89952dd20f..f5dc0476493e 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -23,6 +23,8 @@
/* Currently only one swap segment is supported */
#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+struct ath10k_fw_file;
+
struct ath10k_swap_code_seg_tlv {
__le32 address;
__le32 length;
@@ -39,12 +41,6 @@ union ath10k_swap_code_seg_item {
struct ath10k_swap_code_seg_tail tail;
} __packed;
-enum ath10k_swap_code_seg_bin_type {
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
struct ath10k_swap_code_seg_hw_info {
/* Swap binary image size */
__le32 swap_size;
@@ -65,8 +61,10 @@ struct ath10k_swap_code_seg_info {
};
int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type);
-void ath10k_swap_code_seg_release(struct ath10k *ar);
-int ath10k_swap_code_seg_init(struct ath10k *ar);
+ const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 05a421bc322a..a47cab44d9c8 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -405,7 +405,7 @@ Fw Mode/SubMode Mask
* 1. target firmware would check magic number and if it's a match, firmware
* would consider the bits[0:15] are valid and base on that to calculate
* the end of DRAM. Early allocation would be located at that area and
- * may be reclaimed when necesary
+ * may be reclaimed when necessary
* 2. if no magic number is found, early allocation would happen at "_end"
* symbol of ROM which is located before the app-data and might NOT be
* re-claimable. If this is adopted, link script should keep this in
@@ -438,7 +438,7 @@ Fw Mode/SubMode Mask
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
- (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
@@ -447,6 +447,9 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0
+#define QCA9887_BOARD_DATA_SZ 7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
@@ -456,4 +459,7 @@ Fw Mode/SubMode Mask
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
+#define QCA4019_BOARD_DATA_SZ 12064
+#define QCA4019_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 1d5a2fdcbf56..ed85f938e3c0 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -23,6 +23,7 @@
#include "wmi.h"
#include "hif.h"
#include "hw.h"
+#include "core.h"
#include "testmode_i.h"
@@ -45,7 +46,7 @@ bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
@@ -139,127 +140,8 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
return cfg80211_testmode_reply(skb);
}
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
- size_t len, magic_len, ie_len;
- struct ath10k_fw_ie *hdr;
- char filename[100];
- __le32 *version;
- const u8 *data;
- int ie_id, ret;
-
- snprintf(filename, sizeof(filename), "%s/%s",
- ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
- /* load utf firmware image */
- ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
- if (ret) {
- ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
- filename, ret);
- return ret;
- }
-
- data = ar->testmode.utf->data;
- len = ar->testmode.utf->size;
-
- /* FIXME: call release_firmware() in error cases */
-
- /* magic also includes the null byte, check that as well */
- magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
- if (len < magic_len) {
- ath10k_err(ar, "utf firmware file is too small to contain magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err(ar, "invalid firmware magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* jump over the padding */
- magic_len = ALIGN(magic_len, 4);
-
- len -= magic_len;
- data += magic_len;
-
- /* loop elements */
- while (len > sizeof(struct ath10k_fw_ie)) {
- hdr = (struct ath10k_fw_ie *)data;
-
- ie_id = le32_to_cpu(hdr->id);
- ie_len = le32_to_cpu(hdr->len);
-
- len -= sizeof(*hdr);
- data += sizeof(*hdr);
-
- if (len < ie_len) {
- ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
- ie_id, len, ie_len);
- ret = -EINVAL;
- goto err;
- }
-
- switch (ie_id) {
- case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->testmode.utf_version) - 1)
- break;
-
- memcpy(ar->testmode.utf_version, data, ie_len);
- ar->testmode.utf_version[ie_len] = '\0';
-
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw utf version %s\n",
- ar->testmode.utf_version);
- break;
- case ATH10K_FW_IE_TIMESTAMP:
- /* ignore timestamp, but don't warn about it either */
- break;
- case ATH10K_FW_IE_FW_IMAGE:
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw image ie (%zd B)\n",
- ie_len);
-
- ar->testmode.utf_firmware_data = data;
- ar->testmode.utf_firmware_len = ie_len;
- break;
- case ATH10K_FW_IE_WMI_OP_VERSION:
- if (ie_len != sizeof(u32))
- break;
- version = (__le32 *)data;
- ar->testmode.op_version = le32_to_cpup(version);
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
- ar->testmode.op_version);
- break;
- default:
- ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
- le32_to_cpu(hdr->id));
- break;
- }
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
- len -= ie_len;
- data += ie_len;
- }
-
- if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
- ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
- ret = -EINVAL;
- goto err;
- }
-
- return 0;
-
-err:
- release_firmware(ar->testmode.utf);
-
- return ret;
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
char filename[100];
int ret;
@@ -268,7 +150,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
- ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
+ ret = request_firmware(&fw_file->firmware, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
@@ -281,24 +163,27 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
* correct WMI interface.
*/
- ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
- ar->testmode.utf_firmware_data = ar->testmode.utf->data;
- ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->firmware_data = fw_file->firmware->data;
+ fw_file->firmware_len = fw_file->firmware->size;
return 0;
}
static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
+ struct ath10k_fw_components *utf_mode_fw;
int ret;
- ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+ &ar->testmode.utf_mode_fw.fw_file);
if (ret == 0) {
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
- return 0;
+ goto out;
}
- ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+ ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
return ret;
@@ -306,6 +191,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+out:
+ utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+ /* Use the same board data file as the normal firmware uses (but
+ * it's still "owned" by normal_mode_fw so we shouldn't free it.
+ */
+ utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+ utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+ if (!utf_mode_fw->fw_file.otp_data) {
+ ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+ utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+ utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+ }
+
return 0;
}
@@ -329,7 +229,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
- if (WARN_ON(ar->testmode.utf != NULL)) {
+ if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
/* utf image is already downloaded, it shouldn't be */
ret = -EEXIST;
goto err;
@@ -341,30 +241,34 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to init utf code swap segment: %d\n",
+ ret);
+ goto err_release_utf_mode_fw;
+ }
+ }
+
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
- BUILD_BUG_ON(sizeof(ar->fw_features) !=
- sizeof(ar->testmode.orig_fw_features));
-
- memcpy(ar->testmode.orig_fw_features, ar->fw_features,
- sizeof(ar->fw_features));
- ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
- memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
- ar->wmi.op_version = ar->testmode.op_version;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
- ar->wmi.op_version);
+ ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
- goto err_fw_features;
+ goto err_release_utf_mode_fw;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+ &ar->testmode.utf_mode_fw);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
@@ -373,8 +277,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ar->state = ATH10K_STATE_UTF;
- if (strlen(ar->testmode.utf_version) > 0)
- ver = ar->testmode.utf_version;
+ if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+ ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
else
ver = "API 1";
@@ -387,14 +291,14 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
err_power_down:
ath10k_hif_power_down(ar);
-err_fw_features:
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+err_release_utf_mode_fw:
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
err:
mutex_unlock(&ar->conf_mutex);
@@ -415,13 +319,13 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
ar->state = ATH10K_STATE_OFF;
}
@@ -479,7 +383,7 @@ static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 60fe562e3041..0a47269be289 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -187,12 +187,12 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Do not register hwmon device when temperature reading is not
* supported by firmware
*/
- if (ar->wmi.op_version != ATH10K_FW_WMI_OP_VERSION_10_2_4)
+ if (!(ar->wmi.ops->gen_pdev_get_temperature))
return 0;
/* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */
- if (!config_enabled(CONFIG_HWMON))
+ if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index b610ea5caae8..3abb97f63b1e 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -20,7 +20,7 @@
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
#define ATH10K_HWMON_NAME_LEN 15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
@@ -36,7 +36,7 @@ struct ath10k_thermal {
int temperature;
};
-#ifdef CONFIG_THERMAL
+#if IS_REACHABLE(CONFIG_THERMAL)
int ath10k_thermal_register(struct ath10k *ar);
void ath10k_thermal_unregister(struct ath10k *ar);
void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 71bdb368813d..e0d00cef0bd8 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -250,6 +250,7 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
),
@@ -257,14 +258,16 @@ TRACE_EVENT(ath10k_wmi_dbglog,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "%s %s len %zu",
+ "%s %s %d len %zu",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->buf_len
)
);
@@ -277,6 +280,7 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
@@ -284,14 +288,16 @@ TRACE_EVENT(ath10k_htt_pktlog,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
- "%s %s size %hu",
+ "%s %s %d size %hu",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->buf_len
)
);
@@ -440,6 +446,7 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
@@ -447,14 +454,16 @@ TRACE_EVENT(ath10k_htt_rx_desc,
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
- "%s %s rxdesc len %d",
+ "%s %s %d rxdesc len %d",
__get_str(driver),
__get_str(device),
+ __entry->hw_type,
__entry->len
)
);
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 6d1105ab4592..9852c5d51139 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -23,7 +23,12 @@
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
- if (!ATH10K_SKB_CB(skb)->htt.is_offchan)
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
+ return;
+
+ if (ath10k_mac_tx_frm_has_freq(ar))
return;
/* If the original wait_for_completion() timed out before
@@ -39,32 +44,30 @@ static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
+ struct ieee80211_txq *txq;
struct ath10k_skb_cb *skb_cb;
+ struct ath10k_txq *artxq;
struct sk_buff *msdu;
- struct ieee80211_hdr *hdr;
- __le16 fc;
- bool limit_mgmt_desc = false;
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
- tx_done->msdu_id, !!tx_done->discard,
- !!tx_done->no_ack, !!tx_done->success);
+ "htt tx completion msdu_id %u status %d\n",
+ tx_done->msdu_id, tx_done->status);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
- return;
+ return -EINVAL;
}
spin_lock_bh(&htt->tx_lock);
@@ -73,23 +76,23 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
- return;
+ return -ENOENT;
}
- hdr = (struct ieee80211_hdr *)msdu->data;
- fc = hdr->frame_control;
+ skb_cb = ATH10K_SKB_CB(msdu);
+ txq = skb_cb->txq;
- if (unlikely(ieee80211_is_mgmt(fc)) &&
- ar->hw_params.max_probe_resp_desc_thres)
- limit_mgmt_desc = true;
+ if (txq) {
+ artxq = (void *)txq->drv_priv;
+ artxq->num_fw_queued--;
+ }
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+ ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
- skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);
@@ -98,22 +101,25 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
memset(&info->status, 0, sizeof(info->status));
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
- if (tx_done->discard) {
+ if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
ieee80211_free_txskb(htt->ar->hw, msdu);
- return;
+ return 0;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (tx_done->no_ack)
+ if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
- if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
+
+ return 0;
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -126,7 +132,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -162,7 +168,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
- }), 3*HZ);
+ }), 3 * HZ);
if (time_left == 0)
return -ETIMEDOUT;
@@ -186,6 +192,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer map event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
@@ -202,6 +215,8 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
+ WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
+ ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
@@ -213,6 +228,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer unmap event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
@@ -224,6 +246,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
+ ar->peer_map[ev->peer_id] = NULL;
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index a90e09f5c7f2..e7ea1ae1c438 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,8 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 8f4f6a892581..c9a8bb1186f2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -51,6 +51,8 @@ struct wmi_ops {
struct wmi_roam_ev_arg *arg);
int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
struct wmi_wow_ev_arg *arg);
+ int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg);
enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
@@ -123,7 +125,7 @@ struct wmi_ops {
enum wmi_force_fw_hang_type type,
u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
- struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
@@ -186,6 +188,15 @@ struct wmi_ops {
u8 enable,
u32 detect_level,
u32 detect_margin);
+ struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap);
+ int (*get_vdev_subtype)(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_pdev_bss_chan_info_req)
+ (struct ath10k *ar,
+ enum wmi_bss_survey_req_type type);
+ struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -341,6 +352,16 @@ ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
return ar->wmi.ops->pull_wow_event(ar, skb, arg);
}
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_echo_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
static inline enum wmi_txbf_conf
ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
{
@@ -924,7 +945,7 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
}
static inline int
-ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
struct sk_buff *skb;
@@ -1327,4 +1348,67 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
}
+static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->ext_resource_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->ext_resource_config(ar, type,
+ fw_feature_bitmap);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
+{
+ if (!ar->wmi.ops->get_vdev_subtype)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->get_vdev_subtype(ar, subtype);
+}
+
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_pdev_bss_chan_info_req)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_echo)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_echo(ar, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6fbd17b69469..e64f59300a7c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1223,6 +1223,33 @@ ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_echo_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->value = ev->value;
+
+ kfree(tb);
+ return 0;
+}
+
static struct sk_buff *
ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
{
@@ -2441,7 +2468,7 @@ ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level) {
struct wmi_tlv_dbglog_cmd *cmd;
struct wmi_tlv *tlv;
@@ -3081,6 +3108,34 @@ ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->value = cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+ return skb;
+}
+
/****************/
/* TLV mappings */
/****************/
@@ -3409,6 +3464,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
@@ -3428,6 +3484,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
@@ -3483,6 +3540,26 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_tlv_op_gen_echo,
+};
+
+static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
+ .auth = WMI_TLV_PEER_AUTH,
+ .qos = WMI_TLV_PEER_QOS,
+ .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_TLV_PEER_APSD,
+ .ht = WMI_TLV_PEER_HT,
+ .bw40 = WMI_TLV_PEER_40MHZ,
+ .stbc = WMI_TLV_PEER_STBC,
+ .ldbc = WMI_TLV_PEER_LDPC,
+ .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
+ .vht = WMI_TLV_PEER_VHT,
+ .bw80 = WMI_TLV_PEER_80MHZ,
+ .pmf = WMI_TLV_PEER_PMF,
};
/************/
@@ -3495,4 +3572,5 @@ void ath10k_wmi_tlv_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
ar->wmi.ops = &wmi_tlv_ops;
+ ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index ad655c44afdb..b8aa6000573c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -527,6 +527,24 @@ enum wmi_tlv_vdev_param {
WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
};
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+};
+
enum wmi_tlv_tag {
WMI_TLV_TAG_LAST_RESERVED = 15,
@@ -950,8 +968,8 @@ enum wmi_tlv_service {
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7569db0f69b5..54df425bb0fc 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -29,6 +29,9 @@
#include "p2p.h"
#include "hw.h"
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
.init_cmdid = WMI_INIT_CMDID,
@@ -521,7 +524,8 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
- .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* 10.4 WMI cmd track */
@@ -705,6 +709,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
.pdev_bss_chan_info_request_cmdid =
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -780,6 +785,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -855,6 +861,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +936,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1012,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1098,6 +1107,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -1193,6 +1203,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -1288,6 +1299,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
};
/* firmware 10.2 specific mappings */
@@ -1544,6 +1556,62 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+static const struct wmi_peer_flags_map wmi_peer_flags_map = {
+ .auth = WMI_PEER_AUTH,
+ .qos = WMI_PEER_QOS,
+ .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_PEER_APSD,
+ .ht = WMI_PEER_HT,
+ .bw40 = WMI_PEER_40MHZ,
+ .stbc = WMI_PEER_STBC,
+ .ldbc = WMI_PEER_LDPC,
+ .dyn_mimops = WMI_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_PEER_SPATIAL_MUX,
+ .vht = WMI_PEER_VHT,
+ .bw80 = WMI_PEER_80MHZ,
+ .vht_2g = WMI_PEER_VHT_2G,
+ .pmf = WMI_PEER_PMF,
+};
+
+static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
+ .auth = WMI_10X_PEER_AUTH,
+ .qos = WMI_10X_PEER_QOS,
+ .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10X_PEER_APSD,
+ .ht = WMI_10X_PEER_HT,
+ .bw40 = WMI_10X_PEER_40MHZ,
+ .stbc = WMI_10X_PEER_STBC,
+ .ldbc = WMI_10X_PEER_LDPC,
+ .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
+ .vht = WMI_10X_PEER_VHT,
+ .bw80 = WMI_10X_PEER_80MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
+ .auth = WMI_10_2_PEER_AUTH,
+ .qos = WMI_10_2_PEER_QOS,
+ .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10_2_PEER_APSD,
+ .ht = WMI_10_2_PEER_HT,
+ .bw40 = WMI_10_2_PEER_40MHZ,
+ .stbc = WMI_10_2_PEER_STBC,
+ .ldbc = WMI_10_2_PEER_LDPC,
+ .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
+ .vht = WMI_10_2_PEER_VHT,
+ .bw80 = WMI_10_2_PEER_80MHZ,
+ .vht_2g = WMI_10_2_PEER_VHT_2G,
+ .pmf = WMI_10_2_PEER_PMF,
};
void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1573,6 +1641,7 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
ch->antenna_max = arg->max_antenna_gain;
+ ch->max_tx_power = arg->max_power;
/* mode & flags share storage */
ch->mode = arg->mode;
@@ -1660,6 +1729,8 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
struct ath10k *ar = arvif->ar;
struct ath10k_skb_cb *cb;
struct sk_buff *bcn;
+ bool dtim_zero;
+ bool deliver_cab;
int ret;
spin_lock_bh(&ar->data_lock);
@@ -1679,12 +1750,14 @@ static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
arvif->beacon_state = ATH10K_BEACON_SENDING;
spin_unlock_bh(&ar->data_lock);
+ dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
+ deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
arvif->vdev_id,
bcn->data, bcn->len,
cb->paddr,
- cb->bcn.dtim_zero,
- cb->bcn.deliver_cab);
+ dtim_zero,
+ deliver_cab);
spin_lock_bh(&ar->data_lock);
@@ -1744,7 +1817,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ret = -ESHUTDOWN;
(ret != -EAGAIN);
- }), 3*HZ);
+ }), 3 * HZ);
if (ret)
dev_kfree_skb_any(skb);
@@ -1755,16 +1828,26 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
static struct sk_buff *
ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_vif *arvif;
struct wmi_mgmt_tx_cmd *cmd;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int len;
+ u32 vdev_id;
u32 buf_len = msdu->len;
u16 fc;
hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
+ if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+ } else {
+ vdev_id = 0;
+ }
+
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
return ERR_PTR(-EINVAL);
@@ -1786,7 +1869,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
- cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(msdu)->vdev_id);
+ cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0;
cmd->hdr.buf_len = __cpu_to_le32(buf_len);
@@ -1794,7 +1877,7 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
memcpy(cmd->buf, msdu->data, msdu->len);
- ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_tx_hdr(ar, skb->data, skb->len);
@@ -2032,34 +2115,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum ieee80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = IEEE80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = IEEE80211_BAND_2GHZ;
- }
-
- return band;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -2100,10 +2155,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
size_t pull_len;
u32 msdu_len;
+ u32 len;
- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
ev_hdr = &ev_v2->hdr.v1;
pull_len = sizeof(*ev_v2);
@@ -2128,6 +2186,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
* trailer with credit update. Trim the excess garbage.
*/
@@ -2144,6 +2208,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
size_t pull_len;
u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
ev_hdr = &ev->hdr;
@@ -2164,12 +2230,42 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
/* Make sure bytes added for padding are removed. */
skb_trim(skb, msdu_len);
return 0;
}
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set.
+ */
+ if (ieee80211_is_auth(hdr->frame_control))
+ return false;
+
+ /* qca99x0 based FW delivers broadcast or multicast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+ ar->hw_params.sw_decrypt_mcast_mgmt)
+ return false;
+
+ return true;
+}
+
int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_mgmt_rx_ev_arg arg = {};
@@ -2204,22 +2300,9 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx status %08x\n", rx_status);
- if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb(skb);
- return 0;
- }
-
- if (rx_status & WMI_RX_STATUS_ERR_DECRYPT) {
- dev_kfree_skb(skb);
- return 0;
- }
-
- if (rx_status & WMI_RX_STATUS_ERR_KEY_CACHE_MISS) {
- dev_kfree_skb(skb);
- return 0;
- }
-
- if (rx_status & WMI_RX_STATUS_ERR_CRC) {
+ if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
dev_kfree_skb(skb);
return 0;
}
@@ -2227,14 +2310,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
* of mgmt rx.
*/
if (channel >= 1 && channel <= 14) {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
} else if (channel >= 36 && channel <= 165) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
@@ -2244,7 +2332,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
sband = &ar->mac.sbands[status->band];
@@ -2256,13 +2344,15 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
ath10k_wmi_handle_wep_reauth(ar, skb, status);
- /* FW delivers WEP Shared Auth frame with Protected Bit set and
- * encrypted payload. However in case of PMF it delivers decrypted
- * frames with Protected Bit set. */
- if (ieee80211_has_protected(hdr->frame_control) &&
- !ieee80211_is_auth(hdr->frame_control)) {
+ if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
status->flag |= RX_FLAG_DECRYPTED;
if (!ieee80211_is_action(hdr->frame_control) &&
@@ -2279,7 +2369,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_mac_handle_beacon(ar, skb);
ath10k_dbg(ar, ATH10K_DBG_MGMT,
- "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -2297,7 +2387,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -2427,7 +2517,21 @@ exit:
void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+ struct wmi_echo_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event echo value 0x%08x\n",
+ le32_to_cpu(arg.value));
+
+ if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+ complete(&ar->wmi.barrier);
}
int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
@@ -2558,6 +2662,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
@@ -2808,11 +2922,17 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_2_4_peer_stats *src;
+ const struct wmi_10_2_4_ext_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
+ else
+ stats_len = sizeof(struct wmi_10_2_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, sizeof(*src)))
+ if (!skb_pull(skb, stats_len))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
@@ -2822,6 +2942,9 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
ath10k_wmi_pull_peer_stats(&src->common.old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+
+ if (ath10k_peer_stats_enabled(ar))
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
/* FIXME: expose 10.2 specific values */
list_add_tail(&dst->list, &stats->peers);
@@ -2839,6 +2962,8 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 stats_id;
int i;
if (!skb_pull(skb, sizeof(*ev)))
@@ -2848,6 +2973,8 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10_4_pdev_stats *src;
@@ -2898,15 +3025,46 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
if (!dst)
continue;
- ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
- dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
- dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
- dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
- /* FIXME: expose 10.4 specific values */
-
+ ath10k_wmi_10_4_pull_peer_stats(src, dst);
list_add_tail(&dst->list, &stats->peers);
}
+ for (i = 0; i < num_bcnflt_stats; i++) {
+ const struct wmi_10_4_bss_bcn_filter_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0)
+ return 0;
+
+ stats->extended = true;
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_extd_stats *src;
+ struct ath10k_fw_extd_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ list_add_tail(&dst->list, &stats->peers_extd);
+ }
+
return 0;
}
@@ -3115,10 +3273,10 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
if (tim->dtim_count == 0) {
- ATH10K_SKB_CB(bcn)->bcn.dtim_zero = true;
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
if (__le32_to_cpu(tim_info->tim_mcast) == 1)
- ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
}
ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
@@ -3130,7 +3288,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
struct sk_buff *bcn,
const struct wmi_p2p_noa_info *noa)
{
- if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ if (!arvif->vif->p2p)
return;
ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
@@ -3190,6 +3348,50 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+ i++;
+ }
+
+ return 0;
+}
+
static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
struct sk_buff *skb,
struct wmi_swba_ev_arg *arg)
@@ -3312,6 +3514,12 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
continue;
}
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
/* There are no completions for beacons so wait for next SWBA
* before telling mac80211 to decrement CSA counter
*
@@ -3361,7 +3569,6 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
- ret = -EIO;
goto skip;
}
@@ -3538,7 +3745,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar,
phyerr->tsf_timestamp, tsf, buf_len);
/* Skip event if DFS disabled */
- if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
return;
ATH10K_DFS_STAT_INC(ar, pulses_total);
@@ -4258,34 +4465,58 @@ void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
}
-static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
- u32 num_units, u32 unit_len)
+static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
{
dma_addr_t paddr;
- u32 pool_size;
+ u32 pool_size = 0;
int idx = ar->wmi.num_mem_chunks;
+ void *vaddr = NULL;
- pool_size = num_units * round_up(unit_len, 4);
+ if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
+ return -ENOMEM;
- if (!pool_size)
- return -EINVAL;
+ while (!vaddr && num_units) {
+ pool_size = num_units * round_up(unit_len, 4);
+ if (!pool_size)
+ return -EINVAL;
- ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
- pool_size,
- &paddr,
- GFP_KERNEL);
- if (!ar->wmi.mem_chunks[idx].vaddr) {
- ath10k_warn(ar, "failed to allocate memory chunk\n");
- return -ENOMEM;
+ vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
+ if (!vaddr)
+ num_units /= 2;
}
- memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+ if (!num_units)
+ return -ENOMEM;
+
+ paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ar->dev, paddr)) {
+ kfree(vaddr);
+ return -ENOMEM;
+ }
+ ar->wmi.mem_chunks[idx].vaddr = vaddr;
ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size;
ar->wmi.mem_chunks[idx].req_id = req_id;
ar->wmi.num_mem_chunks++;
+ return num_units;
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ int ret;
+
+ while (num_units) {
+ ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
+ if (ret < 0)
+ return ret;
+
+ num_units -= ret;
+ }
+
return 0;
}
@@ -4450,10 +4681,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
- /* only manually set fw features when not using FW IE format */
- if (ar->fw_api == 1 && ar->fw_version_build > 636)
- set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, ar->max_spatial_stream);
@@ -4483,10 +4710,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
}
if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
- TARGET_10_4_NUM_VDEVS;
- ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
- TARGET_10_4_NUM_VDEVS;
+ ar->max_num_vdevs;
ar->num_tids = ar->num_active_peers * 2;
ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
}
@@ -4600,6 +4833,17 @@ static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
return 0;
}
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ struct wmi_echo_event *ev = (void *)skb->data;
+
+ arg->value = ev->value;
+
+ return 0;
+}
+
int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_rdy_ev_arg arg = {};
@@ -4635,6 +4879,58 @@ static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event *ev;
+ struct survey_info *survey;
+ u64 busy, total, tx, rx, rx_bss;
+ u32 freq, noise_floor;
+ u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+ int idx;
+
+ ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ freq = __le32_to_cpu(ev->freq);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ busy = __le64_to_cpu(ev->cycle_busy);
+ total = __le64_to_cpu(ev->cycle_total);
+ tx = __le64_to_cpu(ev->cycle_tx);
+ rx = __le64_to_cpu(ev->cycle_rx);
+ rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+ return 0;
+}
+
static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -4880,6 +5176,7 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_2_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -4889,6 +5186,18 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_2_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_2_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -4978,6 +5287,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
+ case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -5001,6 +5313,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10_4_event_id id;
+ bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
@@ -5010,6 +5323,18 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_4_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
switch (id) {
case WMI_10_4_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
@@ -5039,6 +5364,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
break;
+ case WMI_10_4_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
case WMI_10_4_HOST_SWBA_EVENTID:
ath10k_wmi_event_host_swba(ar, skb);
break;
@@ -5055,12 +5383,20 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_4_WDS_PEER_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
case WMI_10_4_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
+ case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -5379,9 +5715,16 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
u32 len, val, features;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
- config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
- config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
+ } else {
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ }
+
config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
@@ -5431,8 +5774,17 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
features = WMI_10_2_RX_BATCH_MODE;
- if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+
+ if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
+
+ if (ath10k_peer_stats_enabled(ar))
+ features |= WMI_10_2_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ features |= WMI_10_2_BSS_CHAN_INFO;
+
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -5459,8 +5811,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
- config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
- config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+ config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
+ config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
@@ -5491,7 +5843,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
config.gtk_offload_max_vdev =
__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
- config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+ config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
config.max_peer_ext_stats =
__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
@@ -5651,9 +6003,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
for (i = 0; i < arg->n_bssids; i++)
- memcpy(&bssids->bssid_list[i],
- arg->bssids[i].bssid,
- ETH_ALEN);
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
ptr += sizeof(*bssids);
ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -6328,6 +6679,16 @@ ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
cmd->info0 = __cpu_to_le32(info0);
}
+static void
+ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
+ cmd->peer_bw_rxnss_override = 0;
+}
+
static int
ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
{
@@ -6417,6 +6778,31 @@ ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
{
struct sk_buff *skb;
@@ -6429,6 +6815,26 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct wmi_pdev_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bss info request type %d\n", type);
+
+ return skb;
+}
+
/* This function assumes the beacon is already DMA mapped */
static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
@@ -6536,7 +6942,7 @@ ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
}
static struct sk_buff *
-ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
u32 log_level)
{
struct wmi_dbglog_cfg_cmd *cmd;
@@ -6574,6 +6980,44 @@ ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u32 module_enable,
}
static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_10_4_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le64(module_enable);
+ cmd->module_valid = __cpu_to_le64(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+ __le64_to_cpu(cmd->module_enable),
+ __le64_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
struct wmi_pdev_pktlog_enable_cmd *cmd;
@@ -7007,6 +7451,9 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate", peer->peer_rx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX duration", peer->rx_duration);
+
len += scnprintf(buf + len, buf_len - len, "\n");
*length = len;
}
@@ -7232,6 +7679,135 @@ unlock:
buf[len] = 0;
}
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_LEGACY_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_2_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
+ }
+ return -ENOTSUPP;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_echo_cmd *)skb->data;
+ cmd->value = cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi echo value 0x%08x\n", value);
+ return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+ int ret;
+ int time_left;
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->wmi.barrier);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+ ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7248,6 +7824,7 @@ static const struct wmi_ops wmi_ops = {
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7291,6 +7868,8 @@ static const struct wmi_ops wmi_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7320,6 +7899,7 @@ static const struct wmi_ops wmi_10_1_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7358,6 +7938,8 @@ static const struct wmi_ops wmi_10_1_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7377,6 +7959,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
@@ -7388,6 +7971,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7426,6 +8010,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
@@ -7435,23 +8020,26 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_init = ath10k_wmi_10_2_op_gen_init,
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
/* shared with 10.1 */
.map_svc = wmi_10x_svc_map,
.pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
.gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
.pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
- .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
@@ -7492,6 +8080,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.gen_pdev_enable_adaptive_cca =
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+ .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -7513,6 +8102,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
@@ -7536,6 +8126,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
@@ -7544,7 +8135,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
- .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
@@ -7553,44 +8144,54 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
- .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+ .gen_echo = ath10k_wmi_op_gen_echo,
};
int ath10k_wmi_attach(struct ath10k *ar)
{
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->wmi.ops = &wmi_10_4_ops;
ar->wmi.cmd = &wmi_10_4_cmd_map;
ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
break;
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
ar->wmi.cmd = &wmi_10_2_4_cmd_map;
ar->wmi.ops = &wmi_10_2_4_ops;
ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
break;
case ATH10K_FW_WMI_OP_VERSION_10_2:
ar->wmi.cmd = &wmi_10_2_cmd_map;
ar->wmi.ops = &wmi_10_2_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
ar->wmi.cmd = &wmi_10x_cmd_map;
ar->wmi.ops = &wmi_10_1_ops;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
break;
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->wmi.cmd = &wmi_cmd_map;
ar->wmi.ops = &wmi_ops;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_peer_flags_map;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
ath10k_wmi_tlv_attach(ar);
@@ -7598,12 +8199,13 @@ int ath10k_wmi_attach(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
ath10k_err(ar, "unsupported WMI op version: %d\n",
- ar->wmi.op_version);
+ ar->running_fw->fw_file.wmi_op_version);
return -EINVAL;
}
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
+ init_completion(&ar->wmi.barrier);
INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
@@ -7616,10 +8218,11 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
- dma_free_coherent(ar->dev,
- ar->wmi.mem_chunks[i].len,
- ar->wmi.mem_chunks[i].vaddr,
- ar->wmi.mem_chunks[i].paddr);
+ dma_unmap_single(ar->dev,
+ ar->wmi.mem_chunks[i].paddr,
+ ar->wmi.mem_chunks[i].len,
+ DMA_TO_DEVICE);
+ kfree(ar->wmi.mem_chunks[i].vaddr);
}
ar->wmi.num_mem_chunks = 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 72a4ef709577..1b243c899bef 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -55,7 +55,7 @@
* type.
*
* 6. Comment each parameter part of the WMI command/event structure by
- * using the 2 stars at the begining of C comment instead of one star to
+ * using the 2 stars at the beginning of C comment instead of one star to
* enable HTML document generation using Doxygen.
*
*/
@@ -175,6 +175,15 @@ enum wmi_service {
WMI_SERVICE_AUX_SPECTRAL_INTF,
WMI_SERVICE_AUX_CHAN_LOAD_INTF,
WMI_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_PEER_STATS,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_DYNAMIC,
/* keep last */
WMI_SERVICE_MAX,
@@ -206,6 +215,12 @@ enum wmi_10x_service {
WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
WMI_10X_SERVICE_ATF,
WMI_10X_SERVICE_COEX_GPIO,
+ WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10X_SERVICE_MESH,
+ WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10X_SERVICE_PEER_STATS,
};
enum wmi_main_service {
@@ -286,6 +301,15 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_10_4_SERVICE_PEER_STATS,
+ WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
};
static inline char *wmi_service_name(int service_id)
@@ -375,6 +399,15 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
+ SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT);
+ SVCSTR(WMI_SERVICE_MESH_11S);
+ SVCSTR(WMI_SERVICE_MESH_NON_11S);
+ SVCSTR(WMI_SERVICE_PEER_STATS);
+ SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+ SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
default:
return NULL;
}
@@ -384,8 +417,8 @@ static inline char *wmi_service_name(int service_id)
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
@@ -442,6 +475,18 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_ATF, len);
SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10X_SERVICE_MESH,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -600,6 +645,24 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_MESH_NON_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_TX_MODE_DYNAMIC, len);
}
#undef SVCMAP
@@ -773,6 +836,7 @@ struct wmi_cmd_map {
u32 set_cca_params_cmdid;
u32 pdev_bss_chan_info_request_cmdid;
u32 pdev_enable_adaptive_cca_cmdid;
+ u32 ext_resource_cfg_cmdid;
};
/*
@@ -1265,7 +1329,7 @@ enum wmi_10x_event_id {
WMI_10X_PDEV_TPC_CONFIG_EVENTID,
WMI_10X_GPIO_INPUT_EVENTID,
- WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
};
enum wmi_10_2_cmd_id {
@@ -1385,6 +1449,7 @@ enum wmi_10_2_cmd_id {
WMI_10_2_MU_CAL_START_CMDID,
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@@ -1428,6 +1493,8 @@ enum wmi_10_2_event_id {
WMI_10_2_WDS_PEER_EVENTID,
WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_MU_REPORT_EVENTID,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
@@ -1576,6 +1643,9 @@ enum wmi_10_4_cmd_id {
WMI_10_4_MU_CAL_START_CMDID,
WMI_10_4_SET_CCA_PARAMS_CMDID,
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+ WMI_10_4_VDEV_SET_IE_CMDID,
+ WMI_10_4_SET_LTEU_CONFIG_CMDID,
WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
};
@@ -1638,6 +1708,7 @@ enum wmi_10_4_event_id {
WMI_10_4_PDEV_TEMPERATURE_EVENTID,
WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_4_MU_REPORT_EVENTID,
WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
};
@@ -1732,6 +1803,7 @@ struct wmi_channel {
__le32 reginfo1;
struct {
u8 antenna_max;
+ u8 max_tx_power;
} __packed;
} __packed;
} __packed;
@@ -1771,7 +1843,6 @@ enum wmi_channel_change_cause {
#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
-#define WMI_10_4_MAX_SPATIAL_STREAM 4
/* HT Capabilities*/
#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
@@ -1995,8 +2066,8 @@ struct wmi_10x_service_ready_event {
struct wlan_host_mem_req mem_reqs[0];
} __packed;
-#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
-#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
struct wmi_ready_event {
__le32 sw_version;
@@ -2016,7 +2087,7 @@ struct wmi_resource_config {
* In offload mode target supports features like WOW, chatter and
* other protocol offloads. In order to support them some
* functionalities like reorder buffering, PN checking need to be
- * done in target. This determines maximum number of peers suported
+ * done in target. This determines maximum number of peers supported
* by target in offload mode
*/
__le32 num_offload_peers;
@@ -2197,7 +2268,7 @@ struct wmi_resource_config {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2379,7 +2450,7 @@ struct wmi_resource_config_10x {
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
- * and is overriden by the OS shim as required.
+ * and is overridden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
@@ -2388,6 +2459,8 @@ enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_BSS_CHAN_INFO = BIT(6),
+ WMI_10_2_PEER_STATS = BIT(7),
};
struct wmi_resource_config_10_2 {
@@ -2613,13 +2686,43 @@ struct wmi_resource_config_10_4 {
*/
__le32 iphdr_pad_config;
- /* qwrap configuration
+ /* qwrap configuration (bits 15-0)
* 1 - This is qwrap configuration
* 0 - This is not qwrap
+ *
+ * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+ * In order to get ack-RSSI reporting and to specify the tx-rate for
+ * individual frames, this option must be enabled. This uses an extra
+ * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
*/
__le32 qwrap_config;
} __packed;
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+ WMI_10_4_LTEU_SUPPORT = BIT(0),
+ WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
+ WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
+ WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
+ WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
+ WMI_10_4_PEER_STATS = BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+ /* contains enum wmi_host_platform_type */
+ __le32 host_platform_config;
+ /* see enum wmi_10_4_feature_mask */
+ __le32 fw_feature_bitmap;
+};
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -2641,7 +2744,7 @@ struct wmi_init_cmd {
struct wmi_host_mem_chunks mem_chunks;
} __packed;
-/* _10x stucture is from 10.X FW API */
+/* _10x structure is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
struct wmi_host_mem_chunks mem_chunks;
@@ -2990,11 +3093,17 @@ struct wmi_10_4_mgmt_rx_event {
u8 buf[0];
} __packed;
+struct wmi_mgmt_rx_ext_info {
+ __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO 0x40
#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
@@ -3343,6 +3452,7 @@ struct wmi_pdev_param_map {
u32 wapi_mbssid_offset;
u32 arp_srcaddr;
u32 arp_dstaddr;
+ u32 enable_btcoex;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -3650,6 +3760,15 @@ enum wmi_10_4_pdev_param {
WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_10_4_PDEV_PARAM_RX_BATCHMODE,
+ WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
};
struct wmi_pdev_set_param_cmd {
@@ -3848,7 +3967,7 @@ struct wmi_pdev_stats_tx {
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
- /* wal pdev continous xretry */
+ /* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
/* wal pdev continous xretry */
@@ -4019,6 +4138,13 @@ enum wmi_stats_id {
WMI_STAT_VDEV_RATE = BIT(5),
};
+enum wmi_10_4_stats_id {
+ WMI_10_4_STAT_PEER = BIT(0),
+ WMI_10_4_STAT_AP = BIT(1),
+ WMI_10_4_STAT_INST = BIT(2),
+ WMI_10_4_STAT_PEER_EXTD = BIT(3),
+};
+
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
@@ -4096,10 +4222,10 @@ struct wmi_10_2_stats_event {
*/
struct wmi_pdev_stats_base {
__le32 chan_nf;
- __le32 tx_frame_count;
- __le32 rx_frame_count;
- __le32 rx_clear_count;
- __le32 cycle_count;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
__le32 phy_err_count;
__le32 chan_tx_pwr;
} __packed;
@@ -4192,7 +4318,13 @@ struct wmi_10_2_peer_stats {
struct wmi_10_2_4_peer_stats {
struct wmi_10_2_peer_stats common;
- __le32 unknown_value; /* FIXME: what is this word? */
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_4_ext_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+ __le32 rx_duration;
} __packed;
struct wmi_10_4_peer_stats {
@@ -4212,6 +4344,27 @@ struct wmi_10_4_peer_stats {
__le32 peer_rssi_changed;
} __packed;
+struct wmi_10_4_peer_extd_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 inactive_time;
+ __le32 peer_chain_rssi;
+ __le32 rx_duration;
+ __le32 reserved[10];
+} __packed;
+
+struct wmi_10_4_bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcns_dropped;
+ __le32 bss_bcn_delivered;
+} __packed;
+
+struct wmi_10_4_bss_bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 active_filters;
+ struct wmi_10_4_bss_bcn_stats bss_stats;
+} __packed;
+
struct wmi_10_2_pdev_ext_stats {
__le32 rx_rssi_comb;
__le32 rx_rssi[4];
@@ -4235,10 +4388,40 @@ enum wmi_vdev_type {
};
enum wmi_vdev_subtype {
- WMI_VDEV_SUBTYPE_NONE = 0,
- WMI_VDEV_SUBTYPE_P2P_DEVICE = 1,
- WMI_VDEV_SUBTYPE_P2P_CLIENT = 2,
- WMI_VDEV_SUBTYPE_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+};
+
+enum wmi_vdev_subtype_legacy {
+ WMI_VDEV_SUBTYPE_LEGACY_NONE = 0,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4,
+};
+
+enum wmi_vdev_subtype_10_2_4 {
+ WMI_VDEV_SUBTYPE_10_2_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5,
+};
+
+enum wmi_vdev_subtype_10_4 {
+ WMI_VDEV_SUBTYPE_10_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5,
+ WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6,
};
/* values for vdev_subtype */
@@ -4247,14 +4430,14 @@ enum wmi_vdev_subtype {
/*
* Indicates that AP VDEV uses hidden ssid. only valid for
* AP/GO */
-#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
+#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
/*
* Indicates if robust management frame/management frame
* protection is enabled. For GO/AP vdevs, it indicates that
* it may support station/client associations with RMF enabled.
* For STA/client vdevs, it indicates that sta will
* associate with AP with RMF enabled. */
-#define WMI_VDEV_START_PMF_ENABLED (1<<1)
+#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
struct wmi_p2p_noa_descriptor {
__le32 type_count; /* 255: continuous schedule, 0: reserved */
@@ -4278,9 +4461,9 @@ struct wmi_vdev_start_request_cmd {
__le32 flags;
/* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
struct wmi_ssid ssid;
- /* beacon/probe reponse xmit rate. Applicable for SoftAP. */
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
__le32 bcn_tx_rate;
- /* beacon/probe reponse xmit power. Applicable for SoftAP. */
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
__le32 bcn_tx_power;
/* number of p2p NOA descriptor(s) from scan entry */
__le32 num_noa_descriptors;
@@ -4493,6 +4676,7 @@ struct wmi_vdev_param_map {
u32 meru_vc;
u32 rx_decap_type;
u32 bw_nss_ratemask;
+ u32 set_tsf;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4507,7 +4691,7 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_VDEV_PARAM_MGMT_TX_RATE,
@@ -4638,7 +4822,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
- /* muticast rate in Mbps */
+ /* multicast rate in Mbps */
WMI_10X_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
@@ -4749,6 +4933,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
WMI_10X_VDEV_PARAM_VHT_SGIMASK,
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
enum wmi_10_4_vdev_param {
@@ -4818,6 +5003,12 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_MERU_VC,
WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_10_4_VDEV_PARAM_SENSOR_AP,
+ WMI_10_4_VDEV_PARAM_BEACON_RATE,
+ WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+ WMI_10_4_VDEV_PARAM_CAPABILITIES,
+ WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -4876,7 +5067,7 @@ struct wmi_vdev_simple_event {
} __packed;
/* VDEV start response status codes */
-/* VDEV succesfully started */
+/* VDEV successfully started */
#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
/* requested VDEV not found */
@@ -5192,7 +5383,7 @@ enum wmi_sta_ps_param_pspoll_count {
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5405,6 +5596,16 @@ struct wmi_host_swba_event {
struct wmi_bcn_info bcn_info[0];
} __packed;
+struct wmi_10_2_4_bcn_info {
+ struct wmi_tim_info tim_info;
+ /* The 10.2.4 FW doesn't have p2p NOA info */
+} __packed;
+
+struct wmi_10_2_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_2_4_bcn_info bcn_info[0];
+} __packed;
+
/* 16 words = 512 client + 1 word = for guard */
#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
@@ -5597,7 +5798,7 @@ struct wmi_rate_set {
* the rates are filled from least significant byte to most
* significant byte.
*/
- __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+ __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
} __packed;
struct wmi_rate_set_arg {
@@ -5641,21 +5842,79 @@ struct wmi_peer_set_q_empty_callback_cmd {
__le32 callback_enable;
} __packed;
-#define WMI_PEER_AUTH 0x00000001
-#define WMI_PEER_QOS 0x00000002
-#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
-#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
-#define WMI_PEER_APSD 0x00000800
-#define WMI_PEER_HT 0x00001000
-#define WMI_PEER_40MHZ 0x00002000
-#define WMI_PEER_STBC 0x00008000
-#define WMI_PEER_LDPC 0x00010000
-#define WMI_PEER_DYN_MIMOPS 0x00020000
-#define WMI_PEER_STATIC_MIMOPS 0x00040000
-#define WMI_PEER_SPATIAL_MUX 0x00200000
-#define WMI_PEER_VHT 0x02000000
-#define WMI_PEER_80MHZ 0x04000000
-#define WMI_PEER_VHT_2G 0x08000000
+struct wmi_peer_flags_map {
+ u32 auth;
+ u32 qos;
+ u32 need_ptk_4_way;
+ u32 need_gtk_2_way;
+ u32 apsd;
+ u32 ht;
+ u32 bw40;
+ u32 stbc;
+ u32 ldbc;
+ u32 dyn_mimops;
+ u32 static_mimops;
+ u32 spatial_mux;
+ u32 vht;
+ u32 bw80;
+ u32 vht_2g;
+ u32 pmf;
+};
+
+enum wmi_peer_flags {
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_VHT_2G = 0x08000000,
+ WMI_PEER_PMF = 0x10000000,
+};
+
+enum wmi_10x_peer_flags {
+ WMI_10X_PEER_AUTH = 0x00000001,
+ WMI_10X_PEER_QOS = 0x00000002,
+ WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10X_PEER_APSD = 0x00000800,
+ WMI_10X_PEER_HT = 0x00001000,
+ WMI_10X_PEER_40MHZ = 0x00002000,
+ WMI_10X_PEER_STBC = 0x00008000,
+ WMI_10X_PEER_LDPC = 0x00010000,
+ WMI_10X_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10X_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10X_PEER_VHT = 0x02000000,
+ WMI_10X_PEER_80MHZ = 0x04000000,
+};
+
+enum wmi_10_2_peer_flags {
+ WMI_10_2_PEER_AUTH = 0x00000001,
+ WMI_10_2_PEER_QOS = 0x00000002,
+ WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10_2_PEER_APSD = 0x00000800,
+ WMI_10_2_PEER_HT = 0x00001000,
+ WMI_10_2_PEER_40MHZ = 0x00002000,
+ WMI_10_2_PEER_STBC = 0x00008000,
+ WMI_10_2_PEER_LDPC = 0x00010000,
+ WMI_10_2_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10_2_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10_2_PEER_VHT = 0x02000000,
+ WMI_10_2_PEER_80MHZ = 0x04000000,
+ WMI_10_2_PEER_VHT_2G = 0x08000000,
+ WMI_10_2_PEER_PMF = 0x10000000,
+};
/*
* Peer rate capabilities.
@@ -5721,6 +5980,11 @@ struct wmi_10_2_peer_assoc_complete_cmd {
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
} __packed;
+struct wmi_10_4_peer_assoc_complete_cmd {
+ struct wmi_10_2_peer_assoc_complete_cmd cmd;
+ __le32 peer_bw_rxnss_override;
+} __packed;
+
struct wmi_peer_assoc_complete_arg {
u8 addr[ETH_ALEN];
u32 vdev_id;
@@ -5910,6 +6174,20 @@ struct wmi_dbglog_cfg_cmd {
__le32 config_valid;
} __packed;
+struct wmi_10_4_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le64 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le64 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -5954,6 +6232,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 phy_mode;
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
+ struct wmi_mgmt_rx_ext_info ext_info;
};
struct wmi_ch_info_ev_arg {
@@ -6036,11 +6315,26 @@ struct wmi_roam_ev_arg {
__le32 rssi;
};
+struct wmi_echo_ev_arg {
+ __le32 value;
+};
+
struct wmi_pdev_temperature_event {
/* temperature value in Celcius degree */
__le32 temperature;
} __packed;
+struct wmi_pdev_bss_chan_info_event {
+ __le32 freq;
+ __le32 noise_floor;
+ __le64 cycle_busy;
+ __le64 cycle_total;
+ __le64 cycle_tx;
+ __le64 cycle_rx;
+ __le64 cycle_rx_bss;
+ __le32 reserved;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -6239,6 +6533,21 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
+enum wmi_host_platform_type {
+ WMI_HOST_PLATFORM_HIGH_PERF,
+ WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+ __le32 type;
+ __le32 reserved;
+} __packed;
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
@@ -6336,5 +6645,8 @@ size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf);
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8e02b381990f..77100d42f401 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -233,7 +233,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -285,7 +285,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -325,7 +325,8 @@ exit:
int ath10k_wow_init(struct ath10k *ar)
{
- if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))
return 0;
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 17b6d1aea4c7..5b58475ae489 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,6 +111,26 @@ static void nqx_disable_irq(struct nqx_dev *nqx_dev)
spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
}
+/**
+ * nqx_enable_irq()
+ *
+ * Check if interrupt is enabled or not
+ * and enable interrupt
+ *
+ * Return: void
+ */
+static void nqx_enable_irq(struct nqx_dev *nqx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
+ if (!nqx_dev->irq_enabled) {
+ nqx_dev->irq_enabled = true;
+ enable_irq(nqx_dev->client->irq);
+ }
+ spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
+}
+
static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
{
struct nqx_dev *nqx_dev = dev_id;
@@ -467,6 +487,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
/* hardware dependent delay */
msleep(100);
} else if (arg == 1) {
+ nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
__func__, nqx_dev);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 6515d29e497a..967036a25746 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -941,6 +941,10 @@ struct ipa_uc_wdi_ctx {
struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
void *priv;
ipa_uc_ready_cb uc_ready_cb;
+ /* for AP+STA stats update */
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+ ipa_wdi_meter_notifier_cb stats_notify;
+#endif
};
/**
@@ -1524,6 +1528,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl);
int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
@@ -1564,6 +1569,10 @@ enum ipacm_client_enum ipa2_get_client(int pipe_idx);
bool ipa2_get_client_uplink(int pipe_idx);
+int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
+
+int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
+
/*
* IPADMA
*/
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 0f5d7b7719b5..d88e5a6d3d73 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -763,7 +763,8 @@ static void ipa_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
qmi_ind.apn.mux_id,
(unsigned long int) qmi_ind.apn.num_Mbytes);
- ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+ ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
+ IPA_UPSTEAM_MODEM);
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
index c7c6234aae0e..96554af9aefd 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -168,7 +168,8 @@ int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data);
int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data);
-void ipa_broadcast_quota_reach_ind(uint32_t mux_id);
+void ipa_broadcast_quota_reach_ind(uint32_t mux_id,
+ enum ipa_upstream_type upstream_type);
int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
*data);
@@ -176,6 +177,8 @@ int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
+int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+
int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -268,7 +271,21 @@ static inline int rmnet_ipa_set_data_quota(
return -EPERM;
}
-static inline void ipa_broadcast_quota_reach_ind(uint32_t mux_id) { }
+static inline void ipa_broadcast_quota_reach_ind
+(
+ uint32_t mux_id,
+ enum ipa_upstream_type upstream_type)
+{
+}
+
+static int rmnet_ipa_reset_tethering_stats
+(
+ struct wan_ioctl_reset_tether_stats *data
+)
+{
+ return -EPERM;
+
+}
static inline int ipa_qmi_get_data_stats(
struct ipa_get_data_stats_req_msg_v01 *req,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 4e79fec076e2..6dc194f97656 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,6 +12,7 @@
#include "ipa_i.h"
#include <linux/dmapool.h>
#include <linux/delay.h>
+#include "ipa_qmi_service.h"
#define IPA_HOLB_TMR_DIS 0x0
@@ -1205,6 +1206,12 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ep->client_notify = in->sys.notify;
ep->priv = in->sys.priv;
+ /* for AP+STA stats update */
+ if (in->wdi_notify)
+ ipa_ctx->uc_wdi_ctx.stats_notify = in->wdi_notify;
+ else
+ IPADBG("in->wdi_notify is null\n");
+
if (!ep->skip_ep_cfg) {
if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
@@ -1302,6 +1309,12 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+ /* for AP+STA stats update */
+ if (ipa_ctx->uc_wdi_ctx.stats_notify)
+ ipa_ctx->uc_wdi_ctx.stats_notify = NULL;
+ else
+ IPADBG("uc_wdi_ctx.stats_notify already null\n");
+
uc_timeout:
return result;
}
@@ -1660,6 +1673,23 @@ uc_timeout:
return result;
}
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa2_broadcast_wdi_quota_reach_ind(uint32_t fid,
+ uint64_t num_bytes)
+{
+ IPAERR("Quota reached indication on fis(%d) Mbytes(%lu)\n",
+ fid,
+ (unsigned long int) num_bytes);
+ ipa_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN);
+ return 0;
+}
+
int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
{
int result = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 87d84b43c829..c063e4392c16 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -953,11 +953,39 @@ void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
}
}
+/* ipa2_get_wlan_stats() - get ipa wifi stats
+ *
+ * Return value: success or failure
+ */
+int ipa2_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
+{
+ if (ipa_ctx->uc_wdi_ctx.stats_notify) {
+ ipa_ctx->uc_wdi_ctx.stats_notify(IPA_GET_WDI_SAP_STATS,
+ wdi_sap_stats);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify not registered\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int ipa2_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
+{
+ if (ipa_ctx->uc_wdi_ctx.stats_notify) {
+ ipa_ctx->uc_wdi_ctx.stats_notify(IPA_SET_WIFI_QUOTA,
+ wdi_quota);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify not registered\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
/**
* ipa2_get_client() - provide client mapping
* @client: client type
*
- * Return value: none
+ * Return value: client mapping enum
*/
enum ipacm_client_enum ipa2_get_client(int pipe_idx)
{
@@ -5030,6 +5058,8 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
+ api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
+ ipa2_broadcast_wdi_quota_reach_ind;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index b7583b990a84..246f6b68b839 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,6 +52,8 @@
#define DEFAULT_OUTSTANDING_LOW 32
#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_UPSTEAM_WLAN_IFACE_NAME "wlan0"
+
#define IPA_WWAN_DEVICE_COUNT (1)
#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
@@ -770,6 +772,22 @@ static int find_vchannel_name_index(const char *vchannel_name)
return MAX_NUM_OF_MUX_CHANNEL;
}
+static enum ipa_upstream_type find_upstream_type(const char *upstreamIface)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+ if (strcmp(mux_channel[i].vchannel_name,
+ upstreamIface) == 0)
+ return IPA_UPSTEAM_MODEM;
+ }
+
+ if (strcmp(IPA_UPSTEAM_WLAN_IFACE_NAME, upstreamIface) == 0)
+ return IPA_UPSTEAM_WLAN;
+ else
+ return IPA_UPSTEAM_MAX;
+}
+
static int wwan_register_to_ipa(int index)
{
struct ipa_tx_intf tx_properties = {0};
@@ -2525,10 +2543,10 @@ int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
}
/**
- * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * rmnet_ipa_set_data_quota_modem() - Data quota setting IOCTL handler
* @data - IOCTL data
*
- * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * This function handles WAN_IOC_SET_DATA_QUOTA on modem interface.
* It translates the given interface name to the Modem MUX ID and
* sends the request of the quota to the IPA Modem driver via QMI.
*
@@ -2537,12 +2555,16 @@ int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
* -EFAULT: Invalid interface name provided
* other: See ipa_qmi_set_data_quota
*/
-int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
+static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data)
{
u32 mux_id;
int index;
struct ipa_set_data_usage_quota_req_msg_v01 req;
+ /* stop quota */
+ if (!data->set_quota)
+ ipa_qmi_stop_data_qouta();
+
index = find_vchannel_name_index(data->interface_name);
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name,
@@ -2567,6 +2589,65 @@ int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
return ipa_qmi_set_data_quota(&req);
}
+static int rmnet_ipa_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
+{
+ struct ipa_set_wifi_quota wifi_quota;
+ int rc = 0;
+
+ memset(&wifi_quota, 0, sizeof(struct ipa_set_wifi_quota));
+ wifi_quota.set_quota = data->set_quota;
+ wifi_quota.quota_bytes = data->quota_mbytes;
+ IPAWANDBG("iface name %s, quota %lu\n",
+ data->interface_name,
+ (unsigned long int) data->quota_mbytes);
+
+ rc = ipa2_set_wlan_quota(&wifi_quota);
+ /* check if wlan-fw takes this quota-set */
+ if (!wifi_quota.set_valid)
+ rc = -EFAULT;
+ return rc;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->interface_name);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("upstream iface %s not supported\n",
+ data->interface_name);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ rc = rmnet_ipa_set_data_quota_wifi(data);
+ if (rc) {
+ IPAWANERR("set quota on wifi failed\n");
+ return rc;
+ }
+ } else {
+ rc = rmnet_ipa_set_data_quota_modem(data);
+ if (rc) {
+ IPAWANERR("set quota on modem failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
/* rmnet_ipa_set_tether_client_pipe() -
* @data - IOCTL data
*
@@ -2631,8 +2712,59 @@ int rmnet_ipa_set_tether_client_pipe(
return 0;
}
-int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
- bool reset)
+static int rmnet_ipa_query_tethering_stats_wifi(
+ struct wan_ioctl_query_tether_stats *data, bool reset)
+{
+ struct ipa_get_wdi_sap_stats *sap_stats;
+ int rc;
+
+ sap_stats = kzalloc(sizeof(struct ipa_get_wdi_sap_stats),
+ GFP_KERNEL);
+ if (!sap_stats)
+ return -ENOMEM;
+
+ sap_stats->reset_stats = reset;
+ IPAWANDBG("reset the pipe stats %d\n", sap_stats->reset_stats);
+
+ rc = ipa2_get_wlan_stats(sap_stats);
+ if (rc) {
+ kfree(sap_stats);
+ return rc;
+ } else if (reset) {
+ kfree(sap_stats);
+ return 0;
+ }
+
+ if (sap_stats->stats_valid) {
+ data->ipv4_tx_packets = sap_stats->ipv4_tx_packets;
+ data->ipv4_tx_bytes = sap_stats->ipv4_tx_bytes;
+ data->ipv4_rx_packets = sap_stats->ipv4_rx_packets;
+ data->ipv4_rx_bytes = sap_stats->ipv4_rx_bytes;
+ data->ipv6_tx_packets = sap_stats->ipv6_tx_packets;
+ data->ipv6_tx_bytes = sap_stats->ipv6_tx_bytes;
+ data->ipv6_rx_packets = sap_stats->ipv6_rx_packets;
+ data->ipv6_rx_bytes = sap_stats->ipv6_rx_bytes;
+ }
+
+ IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+ (unsigned long int) data->ipv4_rx_packets,
+ (unsigned long int) data->ipv6_rx_packets,
+ (unsigned long int) data->ipv4_rx_bytes,
+ (unsigned long int) data->ipv6_rx_bytes);
+ IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->ipv4_tx_packets,
+ (unsigned long int) data->ipv6_tx_packets,
+ (unsigned long int) data->ipv4_tx_bytes,
+ (unsigned long int) data->ipv6_tx_bytes);
+
+ kfree(sap_stats);
+ return rc;
+}
+
+int rmnet_ipa_query_tethering_stats_modem(
+ struct wan_ioctl_query_tether_stats *data,
+ bool reset
+)
{
struct ipa_get_data_stats_req_msg_v01 *req;
struct ipa_get_data_stats_resp_msg_v01 *resp;
@@ -2779,6 +2911,70 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
return 0;
}
+int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+ bool reset)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("upstreamIface %s not supported\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG_LOW(" query wifi-backhaul stats\n");
+ rc = rmnet_ipa_query_tethering_stats_wifi(
+ data, false);
+ if (rc) {
+ IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANDBG_LOW(" query modem-backhaul stats\n");
+ rc = rmnet_ipa_query_tethering_stats_modem(
+ data, false);
+ if (rc) {
+ IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
+{
+ enum ipa_upstream_type upstream_type;
+ int rc = 0;
+
+ /* get IPA backhaul type */
+ upstream_type = find_upstream_type(data->upstreamIface);
+
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("upstream iface %s not supported\n",
+ data->upstreamIface);
+ } else if (upstream_type == IPA_UPSTEAM_WLAN) {
+ IPAWANDBG(" reset wifi-backhaul stats\n");
+ rc = rmnet_ipa_query_tethering_stats_wifi(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset WLAN stats failed\n");
+ return rc;
+ }
+ } else {
+ IPAWANDBG(" reset modem-backhaul stats\n");
+ rc = rmnet_ipa_query_tethering_stats_modem(
+ NULL, true);
+ if (rc) {
+ IPAWANERR("reset MODEM stats failed\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
+
/**
* ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
* @mux_id - The MUX ID on which the quota has been reached
@@ -2788,7 +2984,8 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
* on the specific interface which matches the mux_id has been reached.
*
*/
-void ipa_broadcast_quota_reach_ind(u32 mux_id)
+void ipa_broadcast_quota_reach_ind(u32 mux_id,
+ enum ipa_upstream_type upstream_type)
{
char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
@@ -2798,11 +2995,17 @@ void ipa_broadcast_quota_reach_ind(u32 mux_id)
int res;
int index;
- index = find_mux_channel_index(mux_id);
-
- if (index == MAX_NUM_OF_MUX_CHANNEL) {
- IPAWANERR("%u is an mux ID\n", mux_id);
+ /* check upstream_type*/
+ if (upstream_type == IPA_UPSTEAM_MAX) {
+ IPAWANERR("upstreamIface type %d not supported\n",
+ upstream_type);
return;
+ } else if (upstream_type == IPA_UPSTEAM_MODEM) {
+ index = find_mux_channel_index(mux_id);
+ if (index == MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("%u is an mux ID\n", mux_id);
+ return;
+ }
}
res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
@@ -2811,16 +3014,28 @@ void ipa_broadcast_quota_reach_ind(u32 mux_id)
IPAWANERR("message too long (%d)", res);
return;
}
+
/* posting msg for L-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
- "UPSTREAM=%s", mux_channel[index].vchannel_name);
+ "UPSTREAM=%s", mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "UPSTREAM=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
}
+
/* posting msg for M-release for CNE */
+ if (upstream_type == IPA_UPSTEAM_MODEM) {
res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
- "INTERFACE=%s", mux_channel[index].vchannel_name);
+ "INTERFACE=%s", mux_channel[index].vchannel_name);
+ } else {
+ res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+ "INTERFACE=%s", IPA_UPSTEAM_WLAN_IFACE_NAME);
+ }
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
index 6a92c5fb7d52..64d5c488310b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -251,7 +251,7 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
- if (rmnet_ipa_query_tethering_stats(NULL, true)) {
+ if (rmnet_ipa_reset_tethering_stats(NULL)) {
IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
retval = -EFAULT;
break;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 80a9f0ee288b..a878dc6a97db 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3900,38 +3900,34 @@ static struct class regulator_class = {
#ifdef CONFIG_DEBUG_FS
-#define MAX_DEBUG_BUF_LEN 50
-
-static DEFINE_MUTEX(debug_buf_mutex);
-static char debug_buf[MAX_DEBUG_BUF_LEN];
-
static int reg_debug_enable_set(void *data, u64 val)
{
- int err_info;
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
- }
+ struct regulator *regulator = data;
+ int ret;
- if (val)
- err_info = regulator_enable(data);
- else
- err_info = regulator_disable(data);
+ if (val) {
+ ret = regulator_enable(regulator);
+ if (ret)
+ rdev_err(regulator->rdev, "enable failed, ret=%d\n",
+ ret);
+ } else {
+ ret = regulator_disable(regulator);
+ if (ret)
+ rdev_err(regulator->rdev, "disable failed, ret=%d\n",
+ ret);
+ }
- return err_info;
+ return ret;
}
static int reg_debug_enable_get(void *data, u64 *val)
{
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
- }
+ struct regulator *regulator = data;
+
+ *val = regulator_is_enabled(regulator);
- *val = regulator_is_enabled(data);
return 0;
}
-
DEFINE_SIMPLE_ATTRIBUTE(reg_enable_fops, reg_debug_enable_get,
reg_debug_enable_set, "%llu\n");
@@ -3940,13 +3936,13 @@ static int reg_debug_bypass_enable_get(void *data, u64 *val)
struct regulator *regulator = data;
struct regulator_dev *rdev = regulator->rdev;
bool enable = false;
- int rc = 0;
+ int ret = 0;
mutex_lock(&rdev->mutex);
if (rdev->desc->ops->get_bypass) {
- rc = rdev->desc->ops->get_bypass(rdev, &enable);
- if (rc)
- pr_err("get_bypass() failed, rc=%d\n", rc);
+ ret = rdev->desc->ops->get_bypass(rdev, &enable);
+ if (ret)
+ rdev_err(rdev, "get_bypass() failed, ret=%d\n", ret);
} else {
enable = (rdev->bypass_count == rdev->open_count
- rdev->open_offset);
@@ -3955,7 +3951,7 @@ static int reg_debug_bypass_enable_get(void *data, u64 *val)
*val = enable;
- return rc;
+ return ret;
}
static int reg_debug_bypass_enable_set(void *data, u64 val)
@@ -3975,155 +3971,133 @@ static int reg_debug_bypass_enable_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(reg_bypass_enable_fops, reg_debug_bypass_enable_get,
reg_debug_bypass_enable_set, "%llu\n");
-static int reg_debug_fdisable_set(void *data, u64 val)
+static int reg_debug_force_disable_set(void *data, u64 val)
{
- int err_info;
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
- }
+ struct regulator *regulator = data;
+ int ret = 0;
- if (val > 0)
- err_info = regulator_force_disable(data);
- else
- err_info = 0;
+ if (val > 0) {
+ ret = regulator_force_disable(regulator);
+ if (ret)
+ rdev_err(regulator->rdev, "force_disable failed, ret=%d\n",
+ ret);
+ }
- return err_info;
+ return ret;
}
+DEFINE_SIMPLE_ATTRIBUTE(reg_force_disable_fops, reg_debug_enable_get,
+ reg_debug_force_disable_set, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(reg_fdisable_fops, reg_debug_enable_get,
- reg_debug_fdisable_set, "%llu\n");
+#define MAX_DEBUG_BUF_LEN 50
-static ssize_t reg_debug_volt_set(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t reg_debug_voltage_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
{
- int err_info, filled;
- int min, max = -1;
- if (IS_ERR(file) || file == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(file));
- return -ENOMEM;
- }
+ struct regulator *regulator = file->private_data;
+ char buf[MAX_DEBUG_BUF_LEN];
+ int ret, filled;
+ int min_uV, max_uV = -1;
if (count < MAX_DEBUG_BUF_LEN) {
- mutex_lock(&debug_buf_mutex);
-
- if (copy_from_user(debug_buf, (void __user *) buf, count))
+ if (copy_from_user(buf, ubuf, count))
return -EFAULT;
- debug_buf[count] = '\0';
- filled = sscanf(debug_buf, "%d %d", &min, &max);
+ buf[count] = '\0';
+ filled = sscanf(buf, "%d %d", &min_uV, &max_uV);
- mutex_unlock(&debug_buf_mutex);
- /* check that user entered two numbers */
- if (filled < 2 || min < 0 || max < min) {
- pr_info("Error, correct format: 'echo \"min max\""
- " > voltage");
- return -ENOMEM;
- } else {
- err_info = regulator_set_voltage(file->private_data,
- min, max);
+ /* Check that both min and max voltage were specified. */
+ if (filled < 2 || min_uV < 0 || max_uV < min_uV) {
+ rdev_err(regulator->rdev, "incorrect values specified: \"%s\"; should be: \"min_uV max_uV\"\n",
+ buf);
+ return -EINVAL;
}
- } else {
- pr_err("Error-Input voltage pair"
- " string exceeds maximum buffer length");
- return -ENOMEM;
+ ret = regulator_set_voltage(regulator, min_uV, max_uV);
+ if (ret) {
+ rdev_err(regulator->rdev, "set voltage(%d, %d) failed, ret=%d\n",
+ min_uV, max_uV, ret);
+ return ret;
+ }
+ } else {
+ rdev_err(regulator->rdev, "voltage request string exceeds maximum buffer size\n");
+ return -EINVAL;
}
return count;
}
-static ssize_t reg_debug_volt_get(struct file *file, char __user *buf,
+static ssize_t reg_debug_voltage_read(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int voltage, output, rc;
- if (IS_ERR(file) || file == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(file));
- return -ENOMEM;
- }
-
- voltage = regulator_get_voltage(file->private_data);
- mutex_lock(&debug_buf_mutex);
+ struct regulator *regulator = file->private_data;
+ char buf[MAX_DEBUG_BUF_LEN];
+ int voltage, ret;
- output = snprintf(debug_buf, MAX_DEBUG_BUF_LEN-1, "%d\n", voltage);
- rc = simple_read_from_buffer((void __user *) buf, output, ppos,
- (void *) debug_buf, output);
+ voltage = regulator_get_voltage(regulator);
- mutex_unlock(&debug_buf_mutex);
+ ret = snprintf(buf, MAX_DEBUG_BUF_LEN - 1, "%d\n", voltage);
- return rc;
+ return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
}
-static int reg_debug_volt_open(struct inode *inode, struct file *file)
+static int reg_debug_voltage_open(struct inode *inode, struct file *file)
{
- if (IS_ERR(file) || file == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(file));
- return -ENOMEM;
- }
-
file->private_data = inode->i_private;
+
return 0;
}
-static const struct file_operations reg_volt_fops = {
- .write = reg_debug_volt_set,
- .open = reg_debug_volt_open,
- .read = reg_debug_volt_get,
+static const struct file_operations reg_voltage_fops = {
+ .write = reg_debug_voltage_write,
+ .open = reg_debug_voltage_open,
+ .read = reg_debug_voltage_read,
};
static int reg_debug_mode_set(void *data, u64 val)
{
- int err_info;
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
- }
+ struct regulator *regulator = data;
+ unsigned int mode = val;
+ int ret;
- err_info = regulator_set_mode(data, (unsigned int)val);
+ ret = regulator_set_mode(regulator, mode);
+ if (ret)
+ rdev_err(regulator->rdev, "set mode=%u failed, ret=%d\n",
+ mode, ret);
- return err_info;
+ return ret;
}
static int reg_debug_mode_get(void *data, u64 *val)
{
- int err_info;
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
+ struct regulator *regulator = data;
+ int mode;
+
+ mode = regulator_get_mode(regulator);
+ if (mode < 0) {
+ rdev_err(regulator->rdev, "get mode failed, ret=%d\n", mode);
+ return mode;
}
- err_info = regulator_get_mode(data);
+ *val = mode;
- if (err_info < 0) {
- pr_err("Regulator_get_mode returned an error!\n");
- return -ENOMEM;
- } else {
- *val = err_info;
- return 0;
- }
+ return 0;
}
-
-DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get,
- reg_debug_mode_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(reg_mode_fops, reg_debug_mode_get, reg_debug_mode_set,
+ "%llu\n");
static int reg_debug_set_load(void *data, u64 val)
{
- int err_info;
- if (IS_ERR(data) || data == NULL) {
- pr_err("Function Input Error %ld\n", PTR_ERR(data));
- return -ENOMEM;
- }
-
- err_info = regulator_set_load(data, (unsigned int)val);
+ struct regulator *regulator = data;
+ int load = val;
+ int ret;
- if (err_info < 0) {
- pr_err("Regulator_set_optimum_mode returned an error!\n");
- return err_info;
- }
+ ret = regulator_set_load(regulator, load);
+ if (ret)
+ rdev_err(regulator->rdev, "set load=%d failed, ret=%d\n",
+ load, ret);
- return 0;
+ return ret;
}
-
DEFINE_SIMPLE_ATTRIBUTE(reg_set_load_fops, reg_debug_mode_get,
reg_debug_set_load, "%llu\n");
@@ -4133,17 +4107,12 @@ static int reg_debug_consumers_show(struct seq_file *m, void *v)
struct regulator *reg;
char *supply_name;
- if (!rdev) {
- pr_err("regulator device missing");
- return -EINVAL;
- }
-
mutex_lock(&rdev->mutex);
/* Print a header if there are consumers. */
if (rdev->open_count)
- seq_printf(m, "Device-Supply "
- "EN Min_uV Max_uV load_uA\n");
+ seq_printf(m, "%-32s EN Min_uV Max_uV load_uA\n",
+ "Device-Supply");
list_for_each_entry(reg, &rdev->consumer_list, list) {
if (reg->supply_name)
@@ -4189,17 +4158,10 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
struct device *parent = rdev->dev.parent;
const char *rname = rdev_get_name(rdev);
char name[NAME_MAX];
- struct dentry *err_ptr = NULL;
- struct regulator *reg;
- const struct regulator_ops *reg_ops;
+ struct regulator *regulator;
+ const struct regulator_ops *ops;
mode_t mode;
- if (IS_ERR(rdev) || rdev == NULL ||
- IS_ERR(debugfs_root) || debugfs_root == NULL) {
- pr_err("Error-Bad Function Input\n");
- goto error;
- }
-
/* Avoid duplicate debugfs directory names */
if (parent && rname == rdev->desc->name) {
snprintf(name, sizeof(name), "%s-%s", dev_name(parent),
@@ -4210,8 +4172,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
if (!rdev->debugfs) {
rdev_warn(rdev, "Failed to create debugfs directory\n");
- rdev->debugfs = NULL;
- goto error;
+ return;
}
debugfs_create_u32("use_count", 0444, rdev->debugfs,
@@ -4223,100 +4184,58 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
&reg_consumers_fops);
- reg = regulator_get(NULL, rdev_get_name(rdev));
- if (IS_ERR(reg) || reg == NULL) {
- pr_err("Error-Bad Function Input\n");
- goto error;
+ regulator = regulator_get(NULL, rdev_get_name(rdev));
+ if (IS_ERR(regulator)) {
+ rdev_err(rdev, "regulator get failed, ret=%ld\n",
+ PTR_ERR(regulator));
+ return;
}
- rdev->debug_consumer = reg;
+ rdev->debug_consumer = regulator;
rdev->open_offset = 1;
- reg_ops = rdev->desc->ops;
- mode = S_IRUGO | S_IWUSR;
- /* Enabled File */
- if (mode)
- err_ptr = debugfs_create_file("enable", mode, rdev->debugfs,
- reg, &reg_enable_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create enable file\n");
- goto error;
- }
+ ops = rdev->desc->ops;
- mode = 0;
- /* Bypass Enable File */
- if (reg_ops->set_bypass)
- mode = S_IWUSR | S_IRUGO;
-
- if (mode)
- err_ptr = debugfs_create_file("bypass", mode,
- rdev->debugfs, reg,
- &reg_bypass_enable_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create bypass enable file\n");
- goto error;
- }
+ debugfs_create_file("enable", 0644, rdev->debugfs, regulator,
+ &reg_enable_fops);
+ if (ops->set_bypass)
+ debugfs_create_file("bypass", 0644, rdev->debugfs, regulator,
+ &reg_bypass_enable_fops);
mode = 0;
- /* Force-Disable File */
- if (reg_ops->is_enabled)
- mode |= S_IRUGO;
- if (reg_ops->enable || reg_ops->disable)
- mode |= S_IWUSR;
+ if (ops->is_enabled)
+ mode |= 0444;
+ if (ops->disable)
+ mode |= 0200;
if (mode)
- err_ptr = debugfs_create_file("force_disable", mode,
- rdev->debugfs, reg, &reg_fdisable_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create force_disable file\n");
- goto error;
- }
+ debugfs_create_file("force_disable", mode, rdev->debugfs,
+ regulator, &reg_force_disable_fops);
mode = 0;
- /* Voltage File */
- if (reg_ops->get_voltage)
- mode |= S_IRUGO;
- if (reg_ops->set_voltage)
- mode |= S_IWUSR;
+ if (ops->get_voltage || ops->get_voltage_sel)
+ mode |= 0444;
+ if (ops->set_voltage || ops->set_voltage_sel)
+ mode |= 0200;
if (mode)
- err_ptr = debugfs_create_file("voltage", mode, rdev->debugfs,
- reg, &reg_volt_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create voltage file\n");
- goto error;
- }
+ debugfs_create_file("voltage", mode, rdev->debugfs, regulator,
+ &reg_voltage_fops);
mode = 0;
- /* Mode File */
- if (reg_ops->get_mode)
- mode |= S_IRUGO;
- if (reg_ops->set_mode)
- mode |= S_IWUSR;
+ if (ops->get_mode)
+ mode |= 0444;
+ if (ops->set_mode)
+ mode |= 0200;
if (mode)
- err_ptr = debugfs_create_file("mode", mode, rdev->debugfs,
- reg, &reg_mode_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create mode file\n");
- goto error;
- }
+ debugfs_create_file("mode", mode, rdev->debugfs, regulator,
+ &reg_mode_fops);
mode = 0;
- /* Optimum Mode File */
- if (reg_ops->get_mode)
- mode |= S_IRUGO;
- if (reg_ops->set_mode)
- mode |= S_IWUSR;
+ if (ops->get_mode)
+ mode |= 0444;
+ if (ops->set_load || (ops->get_optimum_mode && ops->set_mode))
+ mode |= 0200;
if (mode)
- err_ptr = debugfs_create_file("load", mode,
- rdev->debugfs, reg, &reg_set_load_fops);
- if (IS_ERR(err_ptr)) {
- pr_err("Error-Could not create optimum_mode file\n");
- goto error;
- }
-
- return;
-
-error:
- rdev_deinit_debugfs(rdev);
- return;
+ debugfs_create_file("load", mode, rdev->debugfs, regulator,
+ &reg_set_load_fops);
}
#else
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 561a0d38e502..ded18adba596 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1906,7 +1906,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
icnss_pr_dbg("Modem-Notify: event %lu\n", code);
if (code == SUBSYS_AFTER_SHUTDOWN &&
- notif->crashed != CRASH_STATUS_WDOG_BITE) {
+ notif->crashed == CRASH_STATUS_ERR_FATAL) {
icnss_remove_msa_permissions(priv);
icnss_pr_info("Collecting msa0 segment dump\n");
icnss_msa0_ramdump(priv);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 5246e5d1166c..f397aca8ad3a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -5834,10 +5834,10 @@ int mdss_mdp_display_commit(struct mdss_mdp_ctl *ctl, void *arg,
sctl->flush_bits = 0;
sctl_flush_bits = 0;
} else {
- sctl_flush_bits = sctl->flush_bits;
+ sctl_flush_bits |= sctl->flush_bits;
}
}
- ctl_flush_bits = ctl->flush_bits;
+ ctl_flush_bits |= ctl->flush_bits;
mutex_unlock(&ctl->flush_lock);
}
/*
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index e098e2329ac6..ceba9f7d759a 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -97,6 +97,16 @@ struct adm_cmd_matrix_map_routings_v5 {
*/
#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
+/* This command allows a client to open a COPP/Voice Proc the
+* way as ADM_CMD_DEVICE_OPEN_V5 but supports multiple endpoint2
+* channels.
+*
+* @return
+* #ADM_CMDRSP_DEVICE_OPEN_V6 with the resulting status and
+* COPP ID.
+*/
+#define ADM_CMD_DEVICE_OPEN_V6 0x00010356
+
/* Definition for a low latency stream session. */
#define ADM_LOW_LATENCY_DEVICE_SESSION 0x2000
@@ -246,12 +256,135 @@ struct adm_cmd_device_open_v5 {
/* Array of channel mapping of buffers that the audio COPP
* sends to the endpoint. Channel[i] mapping describes channel
* I inside the buffer, where 0 < i < dev_num_channel.
- * This value is relevent only for an audio Rx COPP.
+ * This value is relevant only for an audio Rx COPP.
* For the voice processor block and Tx audio block, this field
* is set to zero and is ignored.
*/
} __packed;
+/* ADM device open command payload of the
+ * #ADM_CMD_DEVICE_OPEN_V6 command.
+ */
+struct adm_cmd_device_open_v6 {
+ struct apr_hdr hdr;
+ u16 flags;
+/* Reserved for future use. Clients must set this field
+ * to zero.
+ */
+
+ u16 mode_of_operation;
+/* Specifies whether the COPP must be opened on the Tx or Rx
+ * path. Use the ADM_CMD_COPP_OPEN_MODE_OF_OPERATION_* macros for
+ * supported values and interpretation.
+ * Supported values:
+ * - 0x1 -- Rx path COPP
+ * - 0x2 -- Tx path live COPP
+ * - 0x3 -- Tx path nonlive COPP
+ * Live connections cause sample discarding in the Tx device
+ * matrix if the destination output ports do not pull them
+ * fast enough. Nonlive connections queue the samples
+ * indefinitely.
+ */
+
+ u16 endpoint_id_1;
+/* Logical and physical endpoint ID of the audio path.
+ * If the ID is a voice processor Tx block, it receives near
+ * samples. Supported values: Any pseudoport, AFE Rx port,
+ * or AFE Tx port For a list of valid IDs, refer to
+ * @xhyperref{Q4,[Q4]}.
+ * Q4 = Hexagon Multimedia: AFE Interface Specification
+ */
+
+ u16 endpoint_id_2;
+/* Logical and physical endpoint ID 2 for a voice processor
+ * Tx block.
+ * This is not applicable to audio COPP.
+ * Supported values:
+ * - AFE Rx port
+ * - 0xFFFF -- Endpoint 2 is unavailable and the voice
+ * processor Tx
+ * block ignores this endpoint
+ * When the voice processor Tx block is created on the audio
+ * record path,
+ * it can receive far-end samples from an AFE Rx port if the
+ * voice call
+ * is active. The ID of the AFE port is provided in this
+ * field.
+ * For a list of valid IDs, refer @xhyperref{Q4,[Q4]}.
+ */
+
+ u32 topology_id;
+/* Audio COPP topology ID; 32-bit GUID. */
+
+ u16 dev_num_channel;
+/* Number of channels the audio COPP sends to/receives from
+ * the endpoint.
+ * Supported values: 1 to 8.
+ * The value is ignored for the voice processor Tx block,
+ * where channel
+ * configuration is derived from the topology ID.
+ */
+
+ u16 bit_width;
+/* Bit width (in bits) that the audio COPP sends to/receives
+ * from the
+ * endpoint. The value is ignored for the voice processing
+ * Tx block,
+ * where the PCM width is 16 bits.
+ */
+
+ u32 sample_rate;
+/* Sampling rate at which the audio COPP/voice processor
+ * Tx block
+ * interfaces with the endpoint.
+ * Supported values for voice processor Tx: 8000, 16000,
+ * 48000 Hz
+ * Supported values for audio COPP: >0 and <=192 kHz
+ */
+
+ u8 dev_channel_mapping[8];
+/* Array of channel mapping of buffers that the audio COPP
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for an audio Rx COPP.
+ * For the voice processor block and Tx audio block, this field
+ * is set to zero and is ignored.
+ */
+
+ u16 dev_num_channel_eid2;
+/* Number of channels the voice processor block sends
+ * to/receives from the endpoint2.
+ * Supported values: 1 to 8.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+ u16 bit_width_eid2;
+/* Bit width (in bits) that the voice processor sends
+ * to/receives from the endpoint2.
+ * Supported values: 16 and 24.
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+ u32 sample_rate_eid2;
+/* Sampling rate at which the voice processor Tx block
+ * interfaces with the endpoint2.
+ * Supported values for Tx voice processor: >0 and <=384 kHz
+ * The value is ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+
+ u8 dev_channel_mapping_eid2[8];
+/* Array of channel mapping of buffers that the voice processor
+ * sends to the endpoint. Channel[i] mapping describes channel
+ * I inside the buffer, where 0 < i < dev_num_channel.
+ * This value is relevant only for the Tx voice processor.
+ * The values are ignored for audio COPP or if endpoint_id_2 is
+ * set to 0xFFFF.
+ */
+} __packed;
+
/*
* This command allows the client to close a COPP and disconnect
* the device session.
@@ -368,6 +501,16 @@ struct adm_cmd_rsp_device_open_v5 {
/* Reserved. This field must be set to zero.*/
} __packed;
+/* Returns the status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command.
+ */
+#define ADM_CMDRSP_DEVICE_OPEN_V6 0x00010357
+
+/* Payload of the #ADM_CMDRSP_DEVICE_OPEN_V6 message,
+ * which returns the
+ * status and COPP ID to an #ADM_CMD_DEVICE_OPEN_V6 command
+ * is the exact same as ADM_CMDRSP_DEVICE_OPEN_V5.
+ */
+
/* This command allows a query of one COPP parameter.
*/
#define ADM_CMD_GET_PP_PARAMS_V5 0x0001032A
@@ -6768,6 +6911,12 @@ struct admx_mic_gain {
/*< Clients must set this field to zero. */
} __packed;
+struct adm_set_mic_gain_params {
+ struct adm_cmd_set_pp_params_v5 params;
+ struct adm_param_data_v5 data;
+ struct admx_mic_gain mic_gain_data;
+} __packed;
+
/* end_addtogroup audio_pp_param_ids */
/* @ingroup audio_pp_module_ids
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index c9a429d8607d..47e8e2a73920 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -102,6 +102,12 @@ int adm_connect_afe_port(int mode, int session_id, int port_id);
void adm_ec_ref_rx_id(int port_id);
+void adm_num_ec_ref_rx_chans(int num_chans);
+
+void adm_ec_ref_rx_bit_width(int bit_width);
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate);
+
int adm_get_lowlatency_copp_id(int port_id);
int adm_set_multi_ch_map(char *channel_map, int path);
@@ -130,6 +136,8 @@ int adm_set_volume(int port_id, int copp_idx, int volume);
int adm_set_softvolume(int port_id, int copp_idx,
struct audproc_softvolume_params *softvol_param);
+int adm_set_mic_gain(int port_id, int copp_idx, int volume);
+
int adm_param_enable(int port_id, int copp_idx, int module_id, int enable);
int adm_send_calibration(int port_id, int copp_idx, int path, int perf_mode,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 03994225edcf..dbe61b252cb9 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -1860,6 +1860,11 @@ static int msm_routing_lsm_mux_put(struct snd_kcontrol *kcontrol,
int lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
struct snd_soc_dapm_update *update = NULL;
+ if (mux >= e->items) {
+ pr_err("%s: Invalid mux value %d\n", __func__, mux);
+ return -EINVAL;
+ }
+
pr_debug("%s: LSM enable %ld\n", __func__,
ucontrol->value.integer.value[0]);
switch (ucontrol->value.integer.value[0]) {
@@ -2183,6 +2188,11 @@ static int msm_routing_ec_ref_rx_put(struct snd_kcontrol *kcontrol,
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
+ if (mux >= e->items) {
+ pr_err("%s: Invalid mux value %d\n", __func__, mux);
+ return -EINVAL;
+ }
+
mutex_lock(&routing_lock);
switch (ucontrol->value.integer.value[0]) {
case 0:
@@ -2379,6 +2389,11 @@ static int msm_routing_ext_ec_put(struct snd_kcontrol *kcontrol,
uint16_t ext_ec_ref_port_id;
struct snd_soc_dapm_update *update = NULL;
+ if (mux >= e->items) {
+ pr_err("%s: Invalid mux value %d\n", __func__, mux);
+ return -EINVAL;
+ }
+
mutex_lock(&routing_lock);
msm_route_ext_ec_ref = ucontrol->value.integer.value[0];
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 832d7c0170f4..ccd098d65160 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -81,6 +81,10 @@ static int msm_route_hfp_vol_control;
static const DECLARE_TLV_DB_LINEAR(hfp_rx_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
+static int msm_route_icc_vol_control;
+static const DECLARE_TLV_DB_LINEAR(icc_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
static int msm_route_pri_auxpcm_lb_vol_ctrl;
static const DECLARE_TLV_DB_LINEAR(pri_auxpcm_lb_vol_gain, 0,
INT_RX_VOL_MAX_STEPS);
@@ -493,6 +497,23 @@ static int msm_qti_pp_set_slimbus_8_lb_vol_mixer(struct snd_kcontrol *kcontrol,
return ret;
}
+static int msm_qti_pp_get_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_route_icc_vol_control;
+ return 0;
+}
+
+static int msm_qti_pp_set_icc_vol_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ adm_set_mic_gain(AFE_PORT_ID_QUATERNARY_TDM_TX,
+ adm_get_default_copp_idx(AFE_PORT_ID_QUATERNARY_TDM_TX),
+ ucontrol->value.integer.value[0]);
+ msm_route_icc_vol_control = ucontrol->value.integer.value[0];
+ return 0;
+}
+
static int msm_qti_pp_get_quat_mi2s_fm_vol_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -809,6 +830,12 @@ static const struct snd_kcontrol_new int_hfp_vol_mixer_controls[] = {
msm_qti_pp_set_hfp_vol_mixer, hfp_rx_vol_gain),
};
+static const struct snd_kcontrol_new int_icc_vol_mixer_controls[] = {
+ SOC_SINGLE_EXT_TLV("Internal ICC Volume", SND_SOC_NOPM, 0,
+ INT_RX_VOL_GAIN, 0, msm_qti_pp_get_icc_vol_mixer,
+ msm_qti_pp_set_icc_vol_mixer, icc_rx_vol_gain),
+};
+
static const struct snd_kcontrol_new pri_auxpcm_lb_vol_mixer_controls[] = {
SOC_SINGLE_EXT_TLV("PRI AUXPCM LOOPBACK Volume",
AFE_PORT_ID_PRIMARY_PCM_TX, 0, INT_RX_VOL_GAIN, 0,
@@ -1001,6 +1028,9 @@ void msm_qti_pp_add_controls(struct snd_soc_platform *platform)
snd_soc_add_platform_controls(platform, int_hfp_vol_mixer_controls,
ARRAY_SIZE(int_hfp_vol_mixer_controls));
+ snd_soc_add_platform_controls(platform, int_icc_vol_mixer_controls,
+ ARRAY_SIZE(int_icc_vol_mixer_controls));
+
snd_soc_add_platform_controls(platform,
pri_auxpcm_lb_vol_mixer_controls,
ARRAY_SIZE(pri_auxpcm_lb_vol_mixer_controls));
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 30876b52ec9e..04eafdb240f2 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -102,6 +102,9 @@ struct adm_ctl {
int set_custom_topology;
int ec_ref_rx;
+ int num_ec_ref_rx_chans;
+ int ec_ref_rx_bit_width;
+ int ec_ref_rx_sampling_rate;
};
static struct adm_ctl this_adm;
@@ -1355,6 +1358,7 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
*/
case ADM_CMD_DEVICE_OPEN_V5:
case ADM_CMD_DEVICE_CLOSE_V5:
+ case ADM_CMD_DEVICE_OPEN_V6:
pr_debug("%s: Basic callback received, wake up.\n",
__func__);
atomic_set(&this_adm.copp.stat[port_idx]
@@ -1450,7 +1454,8 @@ static int32_t adm_callback(struct apr_client_data *data, void *priv)
}
switch (data->opcode) {
- case ADM_CMDRSP_DEVICE_OPEN_V5: {
+ case ADM_CMDRSP_DEVICE_OPEN_V5:
+ case ADM_CMDRSP_DEVICE_OPEN_V6: {
struct adm_cmd_rsp_device_open_v5 *open =
(struct adm_cmd_rsp_device_open_v5 *)data->payload;
@@ -2257,10 +2262,64 @@ inval_ch_mod:
return rc;
}
+int adm_arrange_mch_ep2_map(struct adm_cmd_device_open_v6 *open_v6,
+ int channel_mode)
+{
+ int rc = 0;
+
+ memset(open_v6->dev_channel_mapping_eid2, 0,
+ PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (channel_mode == 1) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FC;
+ } else if (channel_mode == 2) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ } else if (channel_mode == 3) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+ } else if (channel_mode == 4) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LS;
+ open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 5) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_FC;
+ open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_LS;
+ open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 6) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+ open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+ open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+ open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+ } else if (channel_mode == 8) {
+ open_v6->dev_channel_mapping_eid2[0] = PCM_CHANNEL_FL;
+ open_v6->dev_channel_mapping_eid2[1] = PCM_CHANNEL_FR;
+ open_v6->dev_channel_mapping_eid2[2] = PCM_CHANNEL_LFE;
+ open_v6->dev_channel_mapping_eid2[3] = PCM_CHANNEL_FC;
+ open_v6->dev_channel_mapping_eid2[4] = PCM_CHANNEL_LS;
+ open_v6->dev_channel_mapping_eid2[5] = PCM_CHANNEL_RS;
+ open_v6->dev_channel_mapping_eid2[6] = PCM_CHANNEL_LB;
+ open_v6->dev_channel_mapping_eid2[7] = PCM_CHANNEL_RB;
+ } else {
+ pr_err("%s: invalid num_chan %d\n", __func__,
+ channel_mode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
int perf_mode, uint16_t bit_width, int app_type, int acdb_id)
{
struct adm_cmd_device_open_v5 open;
+ struct adm_cmd_device_open_v6 open_v6;
int ret = 0;
int port_idx, copp_idx, flags;
int tmp_port = q6audio_get_port_id(port_id);
@@ -2409,10 +2468,9 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
open.flags = flags;
open.mode_of_operation = path;
open.endpoint_id_1 = tmp_port;
+ open.endpoint_id_2 = 0xFFFF;
- if (this_adm.ec_ref_rx == -1) {
- open.endpoint_id_2 = 0xFFFF;
- } else if (this_adm.ec_ref_rx && (path != 1)) {
+ if (this_adm.ec_ref_rx && (path != 1)) {
open.endpoint_id_2 = this_adm.ec_ref_rx;
this_adm.ec_ref_rx = -1;
}
@@ -2436,7 +2494,47 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
- ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+ if ((this_adm.num_ec_ref_rx_chans != 0) && (path != 1) &&
+ (open.endpoint_id_2 != 0xFFFF)) {
+ memcpy(&open_v6, &open,
+ sizeof(struct adm_cmd_device_open_v5));
+ open_v6.hdr.opcode = ADM_CMD_DEVICE_OPEN_V6;
+ open_v6.hdr.pkt_size = sizeof(open_v6);
+ open_v6.dev_num_channel_eid2 =
+ this_adm.num_ec_ref_rx_chans;
+ this_adm.num_ec_ref_rx_chans = 0;
+
+ if (this_adm.ec_ref_rx_bit_width != 0) {
+ open_v6.bit_width_eid2 =
+ this_adm.ec_ref_rx_bit_width;
+ this_adm.ec_ref_rx_bit_width = 0;
+ } else {
+ open_v6.bit_width_eid2 = bit_width;
+ }
+
+ if (this_adm.ec_ref_rx_sampling_rate != 0) {
+ open_v6.sample_rate_eid2 =
+ this_adm.ec_ref_rx_sampling_rate;
+ this_adm.ec_ref_rx_sampling_rate = 0;
+ } else {
+ open_v6.sample_rate_eid2 = rate;
+ }
+
+ pr_debug("%s: eid2_channels=%d eid2_bit_width=%d eid2_rate=%d\n",
+ __func__, open_v6.dev_num_channel_eid2,
+ open_v6.bit_width_eid2,
+ open_v6.sample_rate_eid2);
+
+ ret = adm_arrange_mch_ep2_map(&open_v6,
+ open_v6.dev_num_channel_eid2);
+
+ if (ret)
+ return ret;
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open_v6);
+ } else {
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+ }
if (ret < 0) {
pr_err("%s: port_id: 0x%x for[0x%x] failed %d\n",
__func__, tmp_port, port_id, ret);
@@ -2729,7 +2827,28 @@ fail_cmd:
void adm_ec_ref_rx_id(int port_id)
{
this_adm.ec_ref_rx = port_id;
- pr_debug("%s: ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+ pr_debug("%s: ec_ref_rx:%d\n", __func__, this_adm.ec_ref_rx);
+}
+
+void adm_num_ec_ref_rx_chans(int num_chans)
+{
+ this_adm.num_ec_ref_rx_chans = num_chans;
+ pr_debug("%s: num_ec_ref_rx_chans:%d\n",
+ __func__, this_adm.num_ec_ref_rx_chans);
+}
+
+void adm_ec_ref_rx_bit_width(int bit_width)
+{
+ this_adm.ec_ref_rx_bit_width = bit_width;
+ pr_debug("%s: ec_ref_rx_bit_width:%d\n",
+ __func__, this_adm.ec_ref_rx_bit_width);
+}
+
+void adm_ec_ref_rx_sampling_rate(int sampling_rate)
+{
+ this_adm.ec_ref_rx_sampling_rate = sampling_rate;
+ pr_debug("%s: ec_ref_rx_sampling_rate:%d\n",
+ __func__, this_adm.ec_ref_rx_sampling_rate);
}
int adm_close(int port_id, int perf_mode, int copp_idx)
@@ -3471,6 +3590,84 @@ fail_cmd:
return rc;
}
+int adm_set_mic_gain(int port_id, int copp_idx, int volume)
+{
+ struct adm_set_mic_gain_params mic_gain_params;
+ int rc = 0;
+ int sz, port_idx;
+
+ pr_debug("%s:\n", __func__);
+ port_id = afe_convert_virtual_to_portid(port_id);
+ port_idx = adm_validate_and_get_port_index(port_id);
+ if (port_idx < 0) {
+ pr_err("%s: Invalid port_id 0x%x\n", __func__, port_id);
+ return -EINVAL;
+ }
+
+ sz = sizeof(struct adm_set_mic_gain_params);
+
+ mic_gain_params.params.hdr.hdr_field =
+ APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mic_gain_params.params.hdr.pkt_size = sz;
+ mic_gain_params.params.hdr.src_svc = APR_SVC_ADM;
+ mic_gain_params.params.hdr.src_domain = APR_DOMAIN_APPS;
+ mic_gain_params.params.hdr.src_port = port_id;
+ mic_gain_params.params.hdr.dest_svc = APR_SVC_ADM;
+ mic_gain_params.params.hdr.dest_domain = APR_DOMAIN_ADSP;
+ mic_gain_params.params.hdr.dest_port =
+ atomic_read(&this_adm.copp.id[port_idx][copp_idx]);
+ mic_gain_params.params.hdr.token = port_idx << 16 | copp_idx;
+ mic_gain_params.params.hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
+ mic_gain_params.params.payload_addr_lsw = 0;
+ mic_gain_params.params.payload_addr_msw = 0;
+ mic_gain_params.params.mem_map_handle = 0;
+ mic_gain_params.params.payload_size =
+ sizeof(struct adm_param_data_v5) +
+ sizeof(struct admx_mic_gain);
+ mic_gain_params.data.module_id = ADM_MODULE_IDX_MIC_GAIN_CTRL;
+ mic_gain_params.data.param_id = ADM_PARAM_IDX_MIC_GAIN;
+ mic_gain_params.data.param_size =
+ sizeof(struct admx_mic_gain);
+ mic_gain_params.data.reserved = 0;
+ mic_gain_params.mic_gain_data.tx_mic_gain = volume;
+ mic_gain_params.mic_gain_data.reserved = 0;
+ pr_debug("%s: Mic Gain set to %d at port_id 0x%x\n",
+ __func__, volume, port_id);
+
+ atomic_set(&this_adm.copp.stat[port_idx][copp_idx], -1);
+ rc = apr_send_pkt(this_adm.apr, (uint32_t *)&mic_gain_params);
+ if (rc < 0) {
+ pr_err("%s: Set params failed port = %#x\n",
+ __func__, port_id);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback */
+ rc = wait_event_timeout(this_adm.copp.wait[port_idx][copp_idx],
+ atomic_read(&this_adm.copp.stat[port_idx][copp_idx]) >= 0,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!rc) {
+ pr_err("%s: Mic Gain Set params timed out port = %#x\n",
+ __func__, port_id);
+ rc = -EINVAL;
+ goto fail_cmd;
+ } else if (atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx]) > 0) {
+ pr_err("%s: DSP returned error[%s]\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx])));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&this_adm.copp.stat
+ [port_idx][copp_idx]));
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ return rc;
+}
+
int adm_param_enable(int port_id, int copp_idx, int module_id, int enable)
{
struct audproc_enable_param_t adm_mod_enable;
@@ -4345,6 +4542,9 @@ static int __init adm_init(void)
int i = 0, j;
this_adm.apr = NULL;
this_adm.ec_ref_rx = -1;
+ this_adm.num_ec_ref_rx_chans = 0;
+ this_adm.ec_ref_rx_bit_width = 0;
+ this_adm.ec_ref_rx_sampling_rate = 0;
atomic_set(&this_adm.matrix_map_stat, 0);
init_waitqueue_head(&this_adm.matrix_map_wait);
atomic_set(&this_adm.adm_stat, 0);