summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/wil6210.txt8
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-rotator.txt14
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt3
-rw-r--r--Documentation/networking/rmnet.txt82
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-gpu.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi62
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts32
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-bus.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss.dtsi13
-rw-r--r--drivers/char/diag/diag_debugfs.c11
-rw-r--r--drivers/char/diag/diag_masks.c2
-rw-r--r--drivers/char/diag/diag_memorydevice.c60
-rw-r--r--drivers/char/diag/diag_mux.c57
-rw-r--r--drivers/char/diag/diagchar.h32
-rw-r--r--drivers/char/diag/diagchar_core.c91
-rw-r--r--drivers/char/diag/diagfwd.c3
-rw-r--r--drivers/char/diag/diagfwd.h2
-rw-r--r--drivers/char/diag/diagfwd_cntl.c22
-rw-r--r--drivers/char/diag/diagfwd_cntl.h6
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c322
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h4
-rw-r--r--drivers/esoc/esoc_dev.c6
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h36
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_counters.c689
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h32
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c46
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h16
-rw-r--r--drivers/iio/adc/qcom-rradc.c13
-rw-r--r--drivers/input/touchscreen/Kconfig10
-rw-r--r--drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c16
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c22
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.c17
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h1
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c17
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c20
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c9
-rw-r--r--drivers/misc/hdcp.c5
-rw-r--r--drivers/misc/qseecom_kernel.h4
-rw-r--r--drivers/mmc/host/sdhci-msm.c11
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/rmnet/Kconfig21
-rw-r--r--drivers/net/rmnet/Makefile14
-rw-r--r--drivers/net/rmnet/rmnet_config.c1157
-rw-r--r--drivers/net/rmnet/rmnet_config.h107
-rw-r--r--drivers/net/rmnet/rmnet_handlers.c550
-rw-r--r--drivers/net/rmnet/rmnet_handlers.h24
-rw-r--r--drivers/net/rmnet/rmnet_main.c60
-rw-r--r--drivers/net/rmnet/rmnet_map.h100
-rw-r--r--drivers/net/rmnet/rmnet_map_command.c180
-rw-r--r--drivers/net/rmnet/rmnet_map_data.c147
-rw-r--r--drivers/net/rmnet/rmnet_private.h76
-rw-r--r--drivers/net/rmnet/rmnet_stats.c86
-rw-r--r--drivers/net/rmnet/rmnet_stats.h61
-rw-r--r--drivers/net/rmnet/rmnet_vnd.c457
-rw-r--r--drivers/net/rmnet/rmnet_vnd.h34
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c20
-rw-r--r--drivers/net/wireless/cnss_genl/Kconfig7
-rw-r--r--drivers/net/wireless/cnss_genl/Makefile1
-rw-r--r--drivers/net/wireless/cnss_genl/cnss_nl.c204
-rw-r--r--drivers/pci/host/pci-msm.c52
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_flt.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c7
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c75
-rw-r--r--drivers/platform/msm/msm_ext_display.c18
-rw-r--r--drivers/power/supply/qcom/fg-core.h14
-rw-r--r--drivers/power/supply/qcom/fg-memif.c78
-rw-r--r--drivers/power/supply/qcom/pmic-voter.c2
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c64
-rw-r--r--drivers/power/supply/qcom/qpnp-qnovo.c266
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c2
-rw-r--r--drivers/power/supply/qcom/smb-lib.c33
-rw-r--r--drivers/power/supply/qcom/smb-lib.h2
-rw-r--r--drivers/power/supply/qcom/smb-reg.h2
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c20
-rw-r--r--drivers/soc/qcom/glink_private.h5
-rw-r--r--drivers/soc/qcom/glink_ssr.c84
-rw-r--r--drivers/soc/qcom/icnss.c59
-rw-r--r--drivers/soc/qcom/peripheral-loader.c2
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_notifier.c2
-rw-r--r--drivers/soc/qcom/qpnp-haptic.c37
-rw-r--r--drivers/soc/qcom/service-notifier.c22
-rw-r--r--drivers/soc/qcom/spcom.c4
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c8
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/tty/serial/msm_serial_hs.c28
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c7
-rw-r--r--drivers/usb/gadget/function/f_mtp.c1
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c37
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c48
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c39
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c7
-rw-r--r--fs/fat/fatent.c7
-rw-r--r--fs/fat/inode.c5
-rw-r--r--include/linux/diagchar.h1
-rw-r--r--include/linux/msm_ext_display.h4
-rw-r--r--include/linux/qpnp/qpnp-revid.h23
-rw-r--r--include/net/cnss_nl.h100
-rw-r--r--include/sound/apr_audio-v2.h5
-rw-r--r--include/sound/q6asm-v2.h3
-rw-r--r--include/uapi/drm/msm_drm.h71
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/esoc_ctrl.h10
-rw-r--r--include/uapi/linux/rmnet.h213
-rw-r--r--include/uapi/media/msm_media_info.h12
-rw-r--r--kernel/trace/ipc_logging.c6
-rw-r--r--net/ipv6/udp.c59
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c6
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c8
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c209
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c211
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.c130
-rw-r--r--sound/soc/msm/qdsp6v2/msm-qti-pp-config.h14
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c92
133 files changed, 7104 insertions, 528 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index b381bdebdfc9..c4673279953d 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -10,6 +10,10 @@ Required properties:
- compatible: "qcom,wil6210"
- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
+- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
+- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
+- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
+- qcom,smmu-mapping: specifies the base address and size of SMMU space
- qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
the below optional properties:
@@ -33,6 +37,10 @@ Example:
wil6210: qcom,wil6210 {
compatible = "qcom,wil6210";
qcom,smmu-support;
+ qcom,smmu-s1-en;
+ qcom,smmu-fast-map;
+ qcom,smmu-coherent;
+ qcom,smmu-mapping = <0x20000000 0xe0000000>;
qcom,pcie-parent = <&pcie1>;
qcom,wigig-en = <&tlmm 94 0>;
qcom,msm-bus,name = "wil6210";
diff --git a/Documentation/devicetree/bindings/fb/mdss-rotator.txt b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
index 5e077ac23819..d424201cd427 100644
--- a/Documentation/devicetree/bindings/fb/mdss-rotator.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-rotator.txt
@@ -53,6 +53,8 @@ Optional properties
bandwidth compression (ubwc)
- qcom,mdss-has-downscale Boolean property to indicate
if the hw supports downscale
+- qcom,sde-reg-bus: Subnode to provide Bus scaling for register access for
+ rotator
Example:
mdss_rotator: qcom,mdss_rotator {
@@ -75,4 +77,16 @@ Example:
vdd-supply = <&gdsc_mdss>;
gdsc-mmagic-mdss-supply = <&gdsc_mmagic_mdss>;
qcom,supply-names = "vdd", "gdsc-mmagic-mdss";
+ qcom,sde-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
index 17a510a5ee6a..337649824257 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
@@ -66,6 +66,9 @@ Optional properties when qcom,actuator-type is "lra"
- qcom,lra-high-z : High Z configuration for auto resonance. Possible string values are
"none", "opt1", "opt2" and "opt3" (default). For PM660,
"opt0" is valid value for 1 LRA period.
+ - qcom,lra-hw-auto-resonance : boolean, enable Hardware auto-resonance for PM660.
+ Use this property to enable Hardware auto-resonance.
+ If not defined then Software auto-resonance is enabled(default).
- qcom,lra-qwd-drive-duration : Drive duration of LRA in QWD mode for PM660.
Possible values are: 0: 1/4 LRA PERIOD and 1: 3/8 LRA PERIOD
- qcom,lra-calibrate-at-eop : To calibrate at End of Pattern for PM660.
diff --git a/Documentation/networking/rmnet.txt b/Documentation/networking/rmnet.txt
new file mode 100644
index 000000000000..73a2c06dbc9e
--- /dev/null
+++ b/Documentation/networking/rmnet.txt
@@ -0,0 +1,82 @@
+1. Introduction
+
+rmnet driver is used for supporting the Multiplexing and aggregation
+Protocol (MAP). This protocol is used by all recent chipsets using Qualcomm
+Technologies, Inc. modems.
+
+This driver can be used to register onto any physical network device in
+IP mode. Physical transports include USB, HSIC, PCIe and IP accelerator.
+
+Multiplexing allows for creation of logical netdevices (rmnet devices) to
+handle multiple private data networks (PDN) like a default internet, tethering,
+multimedia messaging service (MMS) or IP media subsystem (IMS). Hardware sends
+packets with MAP headers to rmnet. Based on the multiplexer id, rmnet
+routes to the appropriate PDN after removing the MAP header.
+
+Aggregation is required to achieve high data rates. This involves hardware
+sending aggregated bunch of MAP frames. rmnet driver will de-aggregate
+these MAP frames and send them to appropriate PDN's.
+
+2. Packet format
+
+a. MAP packet (data / control)
+
+MAP header has the same endianness of the IP packet.
+
+Packet format -
+
+Bit 0 1 2-7 8 - 15 16 - 31
+Function Command / Data Reserved Pad Multiplexer ID Payload length
+Bit 32 - x
+Function Raw Bytes
+
+Command (1)/ Data (0) bit value is to indicate if the packet is a MAP command
+or data packet. Control packet is used for transport level flow control. Data
+packets are standard IP packets.
+
+Reserved bits are usually zeroed out and to be ignored by receiver.
+
+Padding is number of bytes to be added for 4 byte alignment if required by
+hardware.
+
+Multiplexer ID is to indicate the PDN on which data has to be sent.
+
+Payload length includes the padding length but does not include MAP header
+length.
+
+b. MAP packet (command specific)
+
+Bit 0 1 2-7 8 - 15 16 - 31
+Function Command Reserved Pad Multiplexer ID Payload length
+Bit 32 - 39 40 - 45 46 - 47 48 - 63
+Function Command name Reserved Command Type Reserved
+Bit 64 - 95
+Function Transaction ID
+Bit 96 - 127
+Function Command data
+
+Command 1 indicates disabling flow while 2 is enabling flow
+
+Command types -
+0 for MAP command request
+1 is to acknowledge the receipt of a command
+2 is for unsupported commands
+3 is for error during processing of commands
+
+c. Aggregation
+
+Aggregation is multiple MAP packets (can be data or command) delivered to
+rmnet in a single linear skb. rmnet will process the individual
+packets and either ACK the MAP command or deliver the IP packet to the
+network stack as needed
+
+MAP header|IP Packet|Optional padding|MAP header|IP Packet|Optional padding....
+MAP header|IP Packet|Optional padding|MAP header|Command Packet|Optional pad...
+
+3. Userspace configuration
+
+rmnet userspace configuration is done through netlink library librmnetctl
+and command line utility rmnetcli. Utility is hosted in codeaurora forum git
+
+https://source.codeaurora.org/quic/la/platform/vendor/qcom-opensource/\
+dataservices/tree/rmnetctl
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index ea4f05069aab..df7d30210c19 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -128,3 +128,10 @@
};
};
};
+
+&smb138x_parallel_slave {
+ smb138x_vbus: qcom,smb138x-vbus {
+ status = "disabled";
+ regulator-name = "smb138x-vbus";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
index 0dd2d206ddd5..e0d51db067c9 100644
--- a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
@@ -114,8 +114,8 @@
vdd-supply = <&gdsc_gpu_gx>;
/* CPU latency parameter */
- qcom,pm-qos-active-latency = <349>;
- qcom,pm-qos-wakeup-latency = <349>;
+ qcom,pm-qos-active-latency = <424>;
+ qcom,pm-qos-wakeup-latency = <424>;
/* Quirks */
qcom,gpu-quirk-dp2clockgating-disable;
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
index 43a01094662a..81e0c6930bf3 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
@@ -15,6 +15,7 @@
#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
#include "dsi-panel-truly-1080p-cmd.dtsi"
#include "dsi-panel-truly-1080p-video.dtsi"
+#include "dsi-panel-rm67195-amoled-fhd-cmd.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -49,6 +50,57 @@
qcom,supply-post-on-sleep = <10>;
};
};
+
+ dsi_panel_pwr_supply_labibb_amoled:
+ dsi_panel_pwr_supply_labibb_amoled {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "wqhd-vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1950000>;
+ qcom,supply-enable-load = <32000>;
+ qcom,supply-disable-load = <80>;
+ };
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdda-3p3";
+ qcom,supply-min-voltage = <3300000>;
+ qcom,supply-max-voltage = <3300000>;
+ qcom,supply-enable-load = <13200>;
+ qcom,supply-disable-load = <80>;
+ };
+
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4000000>;
+ qcom,supply-max-voltage = <6300000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@4 {
+ reg = <4>;
+ qcom,supply-name = "oledb";
+ qcom,supply-min-voltage = <5000000>;
+ qcom,supply-max-voltage = <8100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ };
};
&dsi_nt35695b_truly_fhd_video {
@@ -98,3 +150,13 @@
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
+
+&dsi_rm67195_amoled_fhd_cmd {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 19 07 08 05 03 04 a0];
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x2d>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
index 9c34a60e7aaa..9ac76f7e2f6e 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
@@ -347,6 +347,7 @@
qcom,timing-db-mode;
wqhd-vddio-supply = <&pm660_l11>;
+ vdda-3p3-supply = <&pm660l_l6>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
qcom,mdss-mdp = <&mdss_mdp>;
@@ -527,6 +528,19 @@
qcom,mdss-default-ot-rd-limit = <32>;
qcom,mdss-default-ot-wr-limit = <32>;
+
+ qcom,sde-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
index c2408ba7bf76..deb10b591444 100644
--- a/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-qrd.dts
@@ -51,3 +51,35 @@
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_213_en>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
};
+
+&pm660a_oledb {
+ status = "okay";
+ qcom,oledb-default-voltage-mv = <6400>;
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "single_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_rm67195_amoled_fhd_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ oledb-supply = <&pm660a_oledb>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ qcom,platform-reset-gpio = <&tlmm 53 0>;
+ qcom,platform-te-gpio = <&tlmm 59 0>;
+};
+
+&dsi_rm67195_amoled_fhd_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <255>;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_labibb_amoled>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-bus.dtsi b/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
index d555da4cbd08..6c956fc9b9d2 100644
--- a/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
@@ -324,9 +324,9 @@
qcom,qport = <4>;
qcom,qos-mode = "fixed";
qcom,connections = <&slv_hmss_l3 &slv_ebi>;
- qcom,prio-lvl = <0>;
- qcom,prio-rd = <0>;
- qcom,prio-wr = <0>;
+ qcom,prio-lvl = <1>;
+ qcom,prio-rd = <1>;
+ qcom,prio-wr = <1>;
qcom,bus-dev = <&fab_bimc>;
qcom,mas-rpm-id = <ICBID_MASTER_PIMEM>;
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
index e31a863ae22d..64ca4676ccd5 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-cdp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -229,7 +229,7 @@
reg = <0x0>;
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
- qcom,mount-angle = <270>;
+ qcom,mount-angle = <90>;
qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,ois-src = <&ois0>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
index 416cd99a81cb..191beaa4d53b 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-mtp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -229,7 +229,7 @@
reg = <0x0>;
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
- qcom,mount-angle = <270>;
+ qcom,mount-angle = <90>;
qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,ois-src = <&ois0>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
index 4794e648752b..b263d2a68792 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
@@ -593,6 +593,19 @@
qcom,mdss-default-ot-rd-limit = <32>;
qcom,mdss-default-ot-wr-limit = <32>;
+
+ qcom,sde-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
};
};
diff --git a/drivers/char/diag/diag_debugfs.c b/drivers/char/diag/diag_debugfs.c
index b861d5f32d03..ca7dd88048ac 100644
--- a/drivers/char/diag/diag_debugfs.c
+++ b/drivers/char/diag/diag_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -72,6 +72,7 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
"Uses Device Tree: %d\n"
"Apps Supports Separate CMDRSP: %d\n"
"Apps Supports HDLC Encoding: %d\n"
+ "Apps Supports Header Untagging: %d\n"
"Apps Supports Sockets: %d\n"
"Logging Mode: %d\n"
"RSP Buffer is Busy: %d\n"
@@ -86,6 +87,7 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
driver->use_device_tree,
driver->supports_separate_cmdrsp,
driver->supports_apps_hdlc_encoding,
+ driver->supports_apps_header_untagging,
driver->supports_sockets,
driver->logging_mode,
driver->rsp_buf_busy,
@@ -97,18 +99,19 @@ static ssize_t diag_dbgfs_read_status(struct file *file, char __user *ubuf,
for (i = 0; i < NUM_PERIPHERALS; i++) {
ret += scnprintf(buf+ret, buf_size-ret,
- "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c|\n",
+ "p: %s Feature: %02x %02x |%c%c%c%c%c%c%c%c%c|\n",
PERIPHERAL_STRING(i),
driver->feature[i].feature_mask[0],
driver->feature[i].feature_mask[1],
driver->feature[i].rcvd_feature_mask ? 'F':'f',
+ driver->feature[i].peripheral_buffering ? 'B':'b',
driver->feature[i].separate_cmd_rsp ? 'C':'c',
driver->feature[i].encode_hdlc ? 'H':'h',
- driver->feature[i].peripheral_buffering ? 'B':'b',
driver->feature[i].mask_centralization ? 'M':'m',
driver->feature[i].stm_support ? 'Q':'q',
driver->feature[i].sockets_enabled ? 'S':'s',
- driver->feature[i].sent_feature_mask ? 'T':'t');
+ driver->feature[i].sent_feature_mask ? 'T':'t',
+ driver->feature[i].untag_header ? 'U':'u');
}
#ifdef CONFIG_DIAG_OVER_USB
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 44e71a704e6a..0c958d855f94 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -456,6 +456,8 @@ static void diag_send_feature_mask_update(uint8_t peripheral)
DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
if (driver->supports_apps_hdlc_encoding)
DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
+ if (driver->supports_apps_header_untagging)
+ DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
if (driver->supports_sockets)
DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index c552f263d7e5..dc3029cc459d 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include "diagmem.h"
#include "diagfwd.h"
#include "diagfwd_peripheral.h"
+#include "diag_ipc_logging.h"
struct diag_md_info diag_md[NUM_DIAG_MD_DEV] = {
{
@@ -143,9 +144,24 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
if (!buf || len < 0)
return -EINVAL;
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
+ if (driver->pd_logging_mode) {
+ peripheral = GET_PD_CTXT(ctx);
+ switch (peripheral) {
+ case UPD_WLAN:
+ break;
+ case DIAG_ID_MPSS:
+ default:
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+ }
session_info = diag_md_session_get_peripheral(peripheral);
if (!session_info)
@@ -219,18 +235,41 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
uint8_t peripheral = 0;
struct diag_md_session_t *session_info = NULL;
+ mutex_lock(&driver->diagfwd_untag_mutex);
+
for (i = 0; i < NUM_DIAG_MD_DEV && !err; i++) {
ch = &diag_md[i];
for (j = 0; j < ch->num_tbl_entries && !err; j++) {
entry = &ch->tbl[j];
if (entry->len <= 0)
continue;
- peripheral = GET_BUF_PERIPHERAL(entry->ctx);
- /* Account for Apps data as well */
- if (peripheral > NUM_PERIPHERALS)
- goto drop_data;
+ if (driver->pd_logging_mode) {
+ peripheral = GET_PD_CTXT(entry->ctx);
+ switch (peripheral) {
+ case UPD_WLAN:
+ break;
+ case DIAG_ID_MPSS:
+ default:
+ peripheral =
+ GET_BUF_PERIPHERAL(entry->ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ goto drop_data;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(entry->ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ goto drop_data;
+ }
+
session_info =
diag_md_session_get_peripheral(peripheral);
+ if (!session_info) {
+ mutex_unlock(&driver->diagfwd_untag_mutex);
+ return -EIO;
+ }
+
if (session_info && info &&
(session_info->pid != info->pid))
continue;
@@ -303,6 +342,8 @@ drop_data:
if (drain_again)
chk_logging_wakeup();
+ mutex_unlock(&driver->diagfwd_untag_mutex);
+
return err;
}
@@ -322,7 +363,8 @@ int diag_md_close_peripheral(int id, uint8_t peripheral)
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
entry = &ch->tbl[i];
- if (GET_BUF_PERIPHERAL(entry->ctx) != peripheral)
+ if ((GET_BUF_PERIPHERAL(entry->ctx) != peripheral) ||
+ (GET_PD_CTXT(entry->ctx) != peripheral))
continue;
found = 1;
if (ch->ops && ch->ops->write_done) {
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 6586f5e0cf86..55c5de1ea9fc 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,21 +133,43 @@ int diag_mux_queue_read(int proc)
int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
{
struct diag_logger_t *logger = NULL;
- int peripheral;
+ int peripheral, upd;
if (proc < 0 || proc >= NUM_MUX_PROC)
return -EINVAL;
if (!diag_mux)
return -EIO;
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
-
- if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
- logger = diag_mux->md_ptr;
- else
- logger = diag_mux->usb_ptr;
+ upd = GET_PD_CTXT(ctx);
+ if (upd) {
+ switch (upd) {
+ case DIAG_ID_MPSS:
+ upd = PERIPHERAL_MODEM;
+ break;
+ case UPD_WLAN:
+ break;
+ default:
+ pr_err("diag: invalid pd ctxt= %d\n", upd);
+ return -EINVAL;
+ }
+ if (((MD_PERIPHERAL_MASK(upd)) &
+ (diag_mux->mux_mask)) &&
+ driver->md_session_map[upd])
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->usb_ptr;
+ } else {
+
+ peripheral = GET_BUF_PERIPHERAL(ctx);
+ if (peripheral > NUM_PERIPHERALS)
+ return -EINVAL;
+
+ if (MD_PERIPHERAL_MASK(peripheral) &
+ diag_mux->mux_mask)
+ logger = diag_mux->md_ptr;
+ else
+ logger = diag_mux->usb_ptr;
+ }
if (logger && logger->log_ops && logger->log_ops->write)
return logger->log_ops->write(proc, buf, len, ctx);
@@ -159,9 +181,17 @@ int diag_mux_close_peripheral(int proc, uint8_t peripheral)
struct diag_logger_t *logger = NULL;
if (proc < 0 || proc >= NUM_MUX_PROC)
return -EINVAL;
+
/* Peripheral should account for Apps data as well */
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
+ if (peripheral > NUM_PERIPHERALS) {
+ if (driver->num_pd_session) {
+ if (peripheral > NUM_MD_SESSIONS)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+ }
+
if (!diag_mux)
return -EIO;
@@ -182,7 +212,8 @@ int diag_mux_switch_logging(int *req_mode, int *peripheral_mask)
if (!req_mode)
return -EINVAL;
- if (*peripheral_mask <= 0 || *peripheral_mask > DIAG_CON_ALL) {
+ if (*peripheral_mask <= 0 ||
+ (*peripheral_mask > (DIAG_CON_ALL | DIAG_CON_UPD_ALL))) {
pr_err("diag: mask %d in %s\n", *peripheral_mask, __func__);
return -EINVAL;
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 9d235b7abc58..511b019e33ec 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -64,14 +64,19 @@
#define DIAG_CON_LPASS (0x0004) /* Bit mask for LPASS */
#define DIAG_CON_WCNSS (0x0008) /* Bit mask for WCNSS */
#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
-#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
-#define DIAG_CON_CDSP (0x0040)
+#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
+#define DIAG_CON_CDSP (0x0040) /* Bit mask for CDSP */
+
+#define DIAG_CON_UPD_WLAN (0x1000) /*Bit mask for WLAN PD*/
+#define DIAG_CON_UPD_AUDIO (0x2000) /*Bit mask for AUDIO PD*/
+#define DIAG_CON_UPD_SENSORS (0x4000) /*Bit mask for SENSORS PD*/
#define DIAG_CON_NONE (0x0000) /* Bit mask for No SS*/
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
| DIAG_CON_SENSORS | DIAG_CON_WDSP \
| DIAG_CON_CDSP)
+#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
@@ -165,7 +170,7 @@
#define PKT_ALLOC 1
#define PKT_RESET 2
-#define FEATURE_MASK_LEN 2
+#define FEATURE_MASK_LEN 4
#define DIAG_MD_NONE 0
#define DIAG_MD_PERIPHERAL 1
@@ -209,8 +214,18 @@
#define NUM_PERIPHERALS 6
#define APPS_DATA (NUM_PERIPHERALS)
+#define UPD_WLAN 7
+#define UPD_AUDIO 8
+#define UPD_SENSORS 9
+#define NUM_UPD 3
+
+#define DIAG_ID_APPS 1
+#define DIAG_ID_MPSS 2
+#define DIAG_ID_WLAN 3
+
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
-#define NUM_MD_SESSIONS (NUM_PERIPHERALS + 1)
+#define NUM_MD_SESSIONS (NUM_PERIPHERALS \
+ + NUM_UPD + 1)
#define MD_PERIPHERAL_MASK(x) (1 << x)
@@ -407,6 +422,7 @@ struct diag_partial_pkt_t {
struct diag_logging_mode_param_t {
uint32_t req_mode;
uint32_t peripheral_mask;
+ uint32_t pd_mask;
uint8_t mode_param;
} __packed;
@@ -418,6 +434,7 @@ struct diag_md_session_t {
struct diag_mask_info *msg_mask;
struct diag_mask_info *log_mask;
struct diag_mask_info *event_mask;
+ struct thread_info *md_client_thread_info;
struct task_struct *task;
};
@@ -453,6 +470,7 @@ struct diag_feature_t {
uint8_t log_on_demand;
uint8_t separate_cmd_rsp;
uint8_t encode_hdlc;
+ uint8_t untag_header;
uint8_t peripheral_buffering;
uint8_t mask_centralization;
uint8_t stm_support;
@@ -484,6 +502,7 @@ struct diagchar_dev {
int use_device_tree;
int supports_separate_cmdrsp;
int supports_apps_hdlc_encoding;
+ int supports_apps_header_untagging;
int supports_sockets;
/* The state requested in the STM command */
int stm_state_requested[NUM_STM_PROCESSORS];
@@ -515,6 +534,7 @@ struct diagchar_dev {
struct mutex cmd_reg_mutex;
uint32_t cmd_reg_count;
struct mutex diagfwd_channel_mutex[NUM_PERIPHERALS];
+ struct mutex diagfwd_untag_mutex;
/* Sizes that reflect memory pool sizes */
unsigned int poolsize;
unsigned int poolsize_hdlc;
@@ -577,6 +597,10 @@ struct diagchar_dev {
int in_busy_dcipktdata;
int logging_mode;
int logging_mask;
+ int pd_logging_mode;
+ int num_pd_session;
+ int cpd_len_1;
+ int cpd_len_2;
int mask_check;
uint32_t md_session_mask;
uint8_t md_session_mode;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 335064352789..4f56696f52e9 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -395,7 +395,8 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_WDSP;
if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
ret |= DIAG_CON_CDSP;
-
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
+ ret |= DIAG_CON_UPD_WLAN;
return ret;
}
int diag_mask_param(void)
@@ -453,6 +454,14 @@ static void diag_close_logging_process(const int pid)
params.mode_param = 0;
params.peripheral_mask =
diag_translate_kernel_to_user_mask(session_peripheral_mask);
+ if (driver->pd_logging_mode)
+ params.pd_mask =
+ diag_translate_kernel_to_user_mask(session_peripheral_mask);
+
+ if (session_peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN)) {
+ driver->pd_logging_mode--;
+ driver->num_pd_session--;
+ }
mutex_lock(&driver->diagchar_mutex);
diag_switch_logging(&params);
mutex_unlock(&driver->diagchar_mutex);
@@ -1237,11 +1246,10 @@ int diag_md_session_create(int mode, int peripheral_mask, int proc)
mutex_unlock(&driver->md_session_lock);
return -ENOMEM;
}
-
new_session->peripheral_mask = 0;
new_session->pid = current->tgid;
new_session->task = current;
-
+ new_session->md_client_thread_info = current_thread_info();
new_session->log_mask = kzalloc(sizeof(struct diag_mask_info),
GFP_KERNEL);
if (!new_session->log_mask) {
@@ -1359,7 +1367,6 @@ static void diag_md_session_close(struct diag_md_session_t *session_info)
struct diag_md_session_t *diag_md_session_get_pid(int pid)
{
int i;
-
for (i = 0; i < NUM_MD_SESSIONS; i++) {
if (driver->md_session_map[i] &&
driver->md_session_map[i]->pid == pid)
@@ -1475,7 +1482,10 @@ static int diag_md_session_check(int curr_mode, int req_mode,
* If this session owns all the requested peripherals, then
* call function to switch the modes/masks for the md_session
*/
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
+
if (!session_info) {
*change_mode = 1;
return 0;
@@ -1504,7 +1514,9 @@ static int diag_md_session_check(int curr_mode, int req_mode,
* owned by this md session
*/
change_mask = driver->md_session_mask & param->peripheral_mask;
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
if (session_info) {
if ((session_info->peripheral_mask & change_mask)
@@ -1548,6 +1560,8 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_WDSP);
if (peripheral_mask & DIAG_CON_CDSP)
ret |= (1 << PERIPHERAL_CDSP);
+ if (peripheral_mask & DIAG_CON_UPD_WLAN)
+ ret |= (1 << UPD_WLAN);
return ret;
}
@@ -1569,8 +1583,28 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
return -EINVAL;
}
- peripheral_mask = diag_translate_mask(param->peripheral_mask);
- param->peripheral_mask = peripheral_mask;
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ if (driver->md_session_map[PERIPHERAL_MODEM] &&
+ (MD_PERIPHERAL_MASK(PERIPHERAL_MODEM) &
+ diag_mux->mux_mask)) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag_fr: User PD is already logging onto active peripheral logging\n");
+ return -EINVAL;
+ }
+ peripheral_mask =
+ diag_translate_mask(param->pd_mask);
+ param->peripheral_mask = peripheral_mask;
+ driver->pd_logging_mode++;
+ driver->num_pd_session++;
+ break;
+
+ default:
+ peripheral_mask =
+ diag_translate_mask(param->peripheral_mask);
+ param->peripheral_mask = peripheral_mask;
+ break;
+ }
switch (param->req_mode) {
case CALLBACK_MODE:
@@ -1590,7 +1624,7 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
curr_mode = driver->logging_mode;
DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "request to switch logging from %d mask:%0x to %d mask:%0x\n",
+ "request to switch logging from %d mask:%0x to new_mode %d mask:%0x\n",
curr_mode, driver->md_session_mask, new_mode, peripheral_mask);
err = diag_md_session_check(curr_mode, new_mode, param, &do_switch);
@@ -1892,8 +1926,9 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
{
uint8_t hdlc_support;
struct diag_md_session_t *session_info = NULL;
-
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
if (copy_from_user(&hdlc_support, (void __user *)ioarg,
sizeof(uint8_t)))
return -EFAULT;
@@ -1910,6 +1945,27 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
return 0;
}
+static int diag_ioctl_query_pd_logging(unsigned long ioarg)
+{
+ int ret = -EINVAL;
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Untagging support on APPS is %s\n", __func__,
+ ((driver->supports_apps_header_untagging) ?
+ "present" : "absent"));
+
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "diag: %s: Tagging support on MODEM is %s\n", __func__,
+ (driver->feature[PERIPHERAL_MODEM].untag_header ?
+ "present" : "absent"));
+
+ if (driver->supports_apps_header_untagging &&
+ driver->feature[PERIPHERAL_MODEM].untag_header)
+ ret = 0;
+
+ return ret;
+}
+
static int diag_ioctl_register_callback(unsigned long ioarg)
{
int err = 0;
@@ -2149,6 +2205,9 @@ long diagchar_compat_ioctl(struct file *filp,
case DIAG_IOCTL_HDLC_TOGGLE:
result = diag_ioctl_hdlc_toggle(ioarg);
break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ result = diag_ioctl_query_pd_logging(ioarg);
+ break;
}
return result;
}
@@ -2272,6 +2331,9 @@ long diagchar_ioctl(struct file *filp,
case DIAG_IOCTL_HDLC_TOGGLE:
result = diag_ioctl_hdlc_toggle(ioarg);
break;
+ case DIAG_IOCTL_QUERY_PD_LOGGING:
+ result = diag_ioctl_query_pd_logging(ioarg);
+ break;
}
return result;
}
@@ -2603,7 +2665,9 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
} else {
wait_event_interruptible(driver->wait_q,
(driver->in_busy_pktdata == 0));
+ mutex_lock(&driver->md_session_lock);
info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
ret = diag_process_apps_pkt(user_space_data, len, info);
if (ret == 1)
diag_send_error_rsp((void *)(user_space_data), len,
@@ -2671,7 +2735,9 @@ static int diag_user_process_userspace_data(const char __user *buf, int len)
/* send masks to local processor now */
if (!remote_proc) {
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
if (!session_info) {
pr_err("diag:In %s request came from invalid md session pid:%d",
__func__, current->tgid);
@@ -2832,7 +2898,9 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
/* place holder for number of data field */
ret += sizeof(int);
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
exit_stat = diag_md_copy_to_user(buf, &ret, count,
session_info);
goto exit;
@@ -2846,7 +2914,9 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
+ mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
+ mutex_unlock(&driver->md_session_lock);
if (session_info)
COPY_USER_SPACE_OR_EXIT(buf+4,
session_info->hdlc_disabled,
@@ -3275,7 +3345,7 @@ static void diag_debug_init(void)
* to be logged to IPC
*/
diag_debug_mask = DIAG_DEBUG_PERIPHERALS | DIAG_DEBUG_DCI |
- DIAG_DEBUG_BRIDGE;
+ DIAG_DEBUG_USERSPACE | DIAG_DEBUG_BRIDGE;
}
#else
static void diag_debug_init(void)
@@ -3404,6 +3474,8 @@ static int __init diagchar_init(void)
poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
driver->num_clients = max_clients;
driver->logging_mode = DIAG_USB_MODE;
+ driver->pd_logging_mode = 0;
+ driver->num_pd_session = 0;
driver->mask_check = 0;
driver->in_busy_pktdata = 0;
driver->in_busy_dcipktdata = 0;
@@ -3421,6 +3493,7 @@ static int __init diagchar_init(void)
mutex_init(&apps_data_mutex);
for (i = 0; i < NUM_PERIPHERALS; i++)
mutex_init(&driver->diagfwd_channel_mutex[i]);
+ mutex_init(&driver->diagfwd_untag_mutex);
init_waitqueue_head(&driver->wait_q);
INIT_WORK(&(driver->diag_drain_work), diag_drain_work_fn);
INIT_WORK(&(driver->update_user_clients),
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 99a16dd47cd4..532d2b149317 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1587,6 +1587,7 @@ int diagfwd_init(void)
driver->real_time_mode[i] = 1;
driver->supports_separate_cmdrsp = 1;
driver->supports_apps_hdlc_encoding = 1;
+ driver->supports_apps_header_untagging = 1;
mutex_init(&driver->diag_hdlc_mutex);
mutex_init(&driver->diag_cntl_mutex);
mutex_init(&driver->mode_lock);
@@ -1616,6 +1617,8 @@ int diagfwd_init(void)
driver->feature[i].rcvd_feature_mask = 0;
driver->feature[i].peripheral_buffering = 0;
driver->feature[i].encode_hdlc = 0;
+ driver->feature[i].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
driver->feature[i].mask_centralization = 0;
driver->feature[i].log_on_demand = 0;
driver->feature[i].sent_feature_mask = 0;
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 4c6d86fc36ae..97ad3f60ba5e 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -19,9 +19,11 @@
*/
#define SET_BUF_CTXT(p, d, n) \
(((p & 0xFF) << 16) | ((d & 0xFF) << 8) | (n & 0xFF))
+#define SET_PD_CTXT(u) ((u & 0xFF) << 24)
#define GET_BUF_PERIPHERAL(p) ((p & 0xFF0000) >> 16)
#define GET_BUF_TYPE(d) ((d & 0x00FF00) >> 8)
#define GET_BUF_NUM(n) ((n & 0x0000FF))
+#define GET_PD_CTXT(u) ((u & 0xFF000000) >> 24)
#define CHK_OVERFLOW(bufStart, start, end, length) \
((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index 62c8d0028af9..ae749725f6db 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -124,7 +124,9 @@ void diag_notify_md_client(uint8_t peripheral, int data)
info.si_signo = SIGCONT;
if (driver->md_session_map[peripheral] &&
driver->md_session_map[peripheral]->task) {
- if (driver->md_session_map[peripheral]->pid ==
+ if (driver->md_session_map[peripheral]->
+ md_client_thread_info->task != NULL
+ && driver->md_session_map[peripheral]->pid ==
driver->md_session_map[peripheral]->task->tgid) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"md_session %d pid = %d, md_session %d task tgid = %d\n",
@@ -198,6 +200,20 @@ static void process_hdlc_encoding_feature(uint8_t peripheral)
}
}
+static void process_upd_header_untagging_feature(uint8_t peripheral)
+{
+ if (peripheral >= NUM_PERIPHERALS)
+ return;
+
+ if (driver->supports_apps_header_untagging) {
+ driver->feature[peripheral].untag_header =
+ ENABLE_PKT_HEADER_UNTAGGING;
+ } else {
+ driver->feature[peripheral].untag_header =
+ DISABLE_PKT_HEADER_UNTAGGING;
+ }
+}
+
static void process_command_deregistration(uint8_t *buf, uint32_t len,
uint8_t peripheral)
{
@@ -374,6 +390,8 @@ static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
driver->feature[peripheral].separate_cmd_rsp = 1;
if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
process_hdlc_encoding_feature(peripheral);
+ if (FEATURE_SUPPORTED(F_DIAG_PKT_HEADER_UNTAG))
+ process_upd_header_untagging_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_STM))
enable_stm_feature(peripheral);
if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 7eed8ef8779e..e8608f47ff14 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -67,6 +67,7 @@
#define F_DIAG_MASK_CENTRALIZATION 11
#define F_DIAG_SOCKETS_ENABLED 13
#define F_DIAG_DCI_EXTENDED_HEADER_SUPPORT 14
+#define F_DIAG_PKT_HEADER_UNTAG 16
#define ENABLE_SEPARATE_CMDRSP 1
#define DISABLE_SEPARATE_CMDRSP 0
@@ -81,6 +82,9 @@
#define ENABLE_APPS_HDLC_ENCODING 1
#define DISABLE_APPS_HDLC_ENCODING 0
+#define ENABLE_PKT_HEADER_UNTAGGING 1
+#define DISABLE_PKT_HEADER_UNTAGGING 0
+
#define DIAG_MODE_PKT_LEN 36
struct diag_ctrl_pkt_header_t {
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index b04008c8fec3..55d36abe4679 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -46,6 +46,8 @@ static void diagfwd_cntl_open(struct diagfwd_info *fwd_info);
static void diagfwd_cntl_close(struct diagfwd_info *fwd_info);
static void diagfwd_dci_open(struct diagfwd_info *fwd_info);
static void diagfwd_dci_close(struct diagfwd_info *fwd_info);
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len);
static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len);
static void diagfwd_cntl_read_done(struct diagfwd_info *fwd_info,
@@ -59,7 +61,7 @@ struct diagfwd_info peripheral_info[NUM_TYPES][NUM_PERIPHERALS];
static struct diag_channel_ops data_ch_ops = {
.open = NULL,
.close = NULL,
- .read_done = diagfwd_data_read_done
+ .read_done = diagfwd_data_read_untag_done
};
static struct diag_channel_ops cntl_ch_ops = {
@@ -214,6 +216,221 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
return buf->len;
}
+static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
+ struct diagfwd_buf_t *buf, int len)
+{
+ int err = 0;
+ int write_len = 0, peripheral = 0;
+ unsigned char *write_buf = NULL;
+ struct diag_md_session_t *session_info = NULL;
+ uint8_t hdlc_disabled = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ mutex_lock(&driver->hdlc_disable_mutex);
+ mutex_lock(&fwd_info->data_mutex);
+ peripheral = GET_PD_CTXT(buf->ctxt);
+ if (peripheral == DIAG_ID_MPSS)
+ peripheral = PERIPHERAL_MODEM;
+
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
+ if (session_info)
+ hdlc_disabled = session_info->hdlc_disabled;
+ else
+ hdlc_disabled = driver->hdlc_disabled;
+
+ if (hdlc_disabled) {
+ /* The data is raw and and on APPS side HDLC is disabled */
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ if (len > PERIPHERAL_BUF_SZ) {
+ pr_err("diag: In %s, Incoming buffer too large %d, peripheral %d, type: %d\n",
+ __func__, len, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+ write_len = len;
+ if (write_len <= 0)
+ goto end;
+ write_buf = buf->data_raw;
+ } else {
+ if (!buf) {
+ pr_err("diag: In %s, no match for non encode buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, fwd_info->peripheral,
+ fwd_info->type);
+ goto end;
+ }
+
+ write_len = check_bufsize_for_encoding(buf, len);
+ if (write_len <= 0) {
+ pr_err("diag: error in checking buf for encoding\n");
+ goto end;
+ }
+ write_buf = buf->data;
+ err = diag_add_hdlc_encoding(write_buf, &write_len,
+ buf->data_raw, len);
+ if (err) {
+ pr_err("diag: error in adding hdlc encoding\n");
+ goto end;
+ }
+ }
+
+ if (write_len > 0) {
+ err = diag_mux_write(DIAG_LOCAL_PROC, write_buf, write_len,
+ buf->ctxt);
+ if (err) {
+ pr_err_ratelimited("diag: In %s, unable to write to mux error: %d\n",
+ __func__, err);
+ goto end;
+ }
+ }
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diagfwd_queue_read(fwd_info);
+ return;
+
+end:
+ diag_ws_release();
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ if (buf) {
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(buf->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
+}
+
+static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
+ unsigned char *buf, int len)
+{
+ int len_cpd = 0, len_upd_1 = 0;
+ int ctxt_cpd = 0, ctxt_upd_1 = 0;
+ int buf_len = 0, processed = 0;
+ unsigned char *temp_buf_main = NULL;
+ unsigned char *temp_buf_cpd = NULL;
+ unsigned char *temp_buf_upd_1 = NULL;
+ struct diagfwd_buf_t *temp_ptr_upd = NULL;
+ struct diagfwd_buf_t *temp_ptr_cpd = NULL;
+ int flag_buf_1 = 0, flag_buf_2 = 0;
+
+ if (!fwd_info || !buf || len <= 0) {
+ diag_ws_release();
+ return;
+ }
+
+ switch (fwd_info->type) {
+ case TYPE_DATA:
+ case TYPE_CMD:
+ break;
+ default:
+ pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
+ diag_ws_release();
+ return;
+ }
+
+ if (driver->feature[fwd_info->peripheral].encode_hdlc &&
+ driver->feature[fwd_info->peripheral].untag_header) {
+ mutex_lock(&driver->diagfwd_untag_mutex);
+ temp_buf_cpd = buf;
+ temp_buf_main = buf;
+ if (fwd_info->buf_1 &&
+ fwd_info->buf_1->data_raw == buf) {
+ flag_buf_1 = 1;
+ if (fwd_info->type == TYPE_DATA)
+ temp_buf_upd_1 =
+ fwd_info->buf_upd_1_a->data_raw;
+ } else {
+ flag_buf_2 = 1;
+ if (fwd_info->type == TYPE_DATA)
+ temp_buf_upd_1 =
+ fwd_info->buf_upd_1_b->data_raw;
+ }
+ while (processed < len) {
+ buf_len =
+ *(uint16_t *) (temp_buf_main + 2);
+ switch ((*temp_buf_main)) {
+ case DIAG_ID_MPSS:
+ ctxt_cpd = DIAG_ID_MPSS;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ case DIAG_ID_WLAN:
+ ctxt_upd_1 = UPD_WLAN;
+ len_upd_1 += buf_len;
+ if (temp_buf_upd_1) {
+ memcpy(temp_buf_upd_1,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_1 += buf_len;
+ }
+ break;
+ }
+ len = len - 4;
+ temp_buf_main += (buf_len + 4);
+ processed += buf_len;
+ }
+ if (fwd_info->type == TYPE_DATA && len_upd_1) {
+ if (flag_buf_1)
+ temp_ptr_upd = fwd_info->buf_upd_1_a;
+ else
+ temp_ptr_upd = fwd_info->buf_upd_1_b;
+ temp_ptr_upd->ctxt &= 0x00FFFFFF;
+ temp_ptr_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd_1));
+ atomic_set(&temp_ptr_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_upd, len_upd_1);
+ }
+ if (len_cpd) {
+ if (flag_buf_1) {
+ driver->cpd_len_1 = len_cpd;
+ temp_ptr_cpd = fwd_info->buf_1;
+ } else {
+ driver->cpd_len_2 = len_cpd;
+ temp_ptr_cpd = fwd_info->buf_2;
+ }
+ temp_ptr_cpd->ctxt &= 0x00FFFFFF;
+ temp_ptr_cpd->ctxt |=
+ (SET_PD_CTXT(ctxt_cpd));
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_cpd, len_cpd);
+ } else {
+ if (flag_buf_1)
+ driver->cpd_len_1 = 0;
+ if (flag_buf_2)
+ driver->cpd_len_2 = 0;
+ }
+ mutex_unlock(&driver->diagfwd_untag_mutex);
+ } else {
+ diagfwd_data_read_done(fwd_info, buf, len);
+ }
+}
+
static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len)
{
@@ -223,6 +440,7 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
struct diagfwd_buf_t *temp_buf = NULL;
struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled = 0;
+
if (!fwd_info || !buf || len <= 0) {
diag_ws_release();
return;
@@ -234,8 +452,8 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
break;
default:
pr_err_ratelimited("diag: In %s, invalid type %d for peripheral %d\n",
- __func__, fwd_info->type,
- fwd_info->peripheral);
+ __func__, fwd_info->type,
+ fwd_info->peripheral);
diag_ws_release();
return;
}
@@ -941,7 +1159,15 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
atomic_set(&fwd_info->buf_1->in_busy, 0);
else if (ctxt == 2 && fwd_info->buf_2)
atomic_set(&fwd_info->buf_2->in_busy, 0);
- else
+ else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
+ atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
+ if (driver->cpd_len_1 == 0)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ } else if (ctxt == 4 && fwd_info->buf_upd_1_b) {
+ atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
+ if (driver->cpd_len_2 == 0)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else
pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
diagfwd_queue_read(fwd_info);
@@ -1073,6 +1299,7 @@ static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
{
+ unsigned char *temp_buf;
if (!fwd_info)
return;
@@ -1125,6 +1352,54 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
fwd_info->type, 2);
}
+ if (driver->feature[fwd_info->peripheral].untag_header) {
+ if (!fwd_info->buf_upd_1_a) {
+ fwd_info->buf_upd_1_a =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_a)
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_upd_1_a);
+ }
+
+ if (!fwd_info->buf_upd_1_a->data) {
+ fwd_info->buf_upd_1_a->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_a->data)
+ goto err;
+ fwd_info->buf_upd_1_a->len = PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_upd_1_a->data);
+ fwd_info->buf_upd_1_a->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 3);
+ }
+ if (!fwd_info->buf_upd_1_b) {
+ fwd_info->buf_upd_1_b =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_b)
+ goto err;
+ kmemleak_not_leak(fwd_info->buf_upd_1_b);
+ }
+
+ if (!fwd_info->buf_upd_1_b->data) {
+ fwd_info->buf_upd_1_b->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_b->data)
+ goto err;
+ fwd_info->buf_upd_1_b->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(fwd_info->buf_upd_1_b->data);
+ fwd_info->buf_upd_1_b->ctxt = SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 4);
+ }
+ }
+
if (driver->supports_apps_hdlc_encoding) {
/* In support of hdlc encoding */
if (!fwd_info->buf_1->data_raw) {
@@ -1134,7 +1409,8 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
GFP_KERNEL);
if (!fwd_info->buf_1->data_raw)
goto err;
- fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
+ fwd_info->buf_1->len_raw =
+ PERIPHERAL_BUF_SZ;
kmemleak_not_leak(fwd_info->buf_1->data_raw);
}
if (!fwd_info->buf_2->data_raw) {
@@ -1144,13 +1420,45 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
GFP_KERNEL);
if (!fwd_info->buf_2->data_raw)
goto err;
- fwd_info->buf_2->len_raw = PERIPHERAL_BUF_SZ;
+ fwd_info->buf_2->len_raw =
+ PERIPHERAL_BUF_SZ;
kmemleak_not_leak(fwd_info->buf_2->data_raw);
}
+
+ if (driver->feature[fwd_info->peripheral].
+ untag_header) {
+ if (!fwd_info->buf_upd_1_a->data_raw) {
+ fwd_info->buf_upd_1_a->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_a->data_raw)
+ goto err;
+ fwd_info->buf_upd_1_a->len_raw =
+ PERIPHERAL_BUF_SZ;
+ temp_buf =
+ fwd_info->buf_upd_1_a->data_raw;
+ kmemleak_not_leak(temp_buf);
+ }
+ if (!fwd_info->buf_upd_1_b->data_raw) {
+ fwd_info->buf_upd_1_b->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ if (!fwd_info->buf_upd_1_b->data_raw)
+ goto err;
+ fwd_info->buf_upd_1_b->len_raw =
+ PERIPHERAL_BUF_SZ;
+ temp_buf =
+ fwd_info->buf_upd_1_b->data_raw;
+ kmemleak_not_leak(temp_buf);
+ }
+ }
}
}
- if (fwd_info->type == TYPE_CMD && driver->supports_apps_hdlc_encoding) {
+ if (fwd_info->type == TYPE_CMD &&
+ driver->supports_apps_hdlc_encoding) {
/* In support of hdlc encoding */
if (!fwd_info->buf_1->data_raw) {
fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index 23aa526b2c09..f483da81cc96 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,6 +78,8 @@ struct diagfwd_info {
void *ctxt;
struct diagfwd_buf_t *buf_1;
struct diagfwd_buf_t *buf_2;
+ struct diagfwd_buf_t *buf_upd_1_a;
+ struct diagfwd_buf_t *buf_upd_1_b;
struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
struct diag_peripheral_ops *p_ops;
struct diag_channel_ops *c_ops;
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
index 26b8d0fe512b..ffb2237da5fa 100644
--- a/drivers/esoc/esoc_dev.c
+++ b/drivers/esoc/esoc_dev.c
@@ -214,7 +214,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
esoc_clink->name);
return -EIO;
}
- put_user(req, (unsigned long __user *)uarg);
+ put_user(req, (unsigned int __user *)uarg);
}
return err;
@@ -227,7 +227,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
err = clink_ops->get_status(&status, esoc_clink);
if (err)
return err;
- put_user(status, (unsigned long __user *)uarg);
+ put_user(status, (unsigned int __user *)uarg);
break;
case ESOC_WAIT_FOR_CRASH:
err = wait_event_interruptible(esoc_udev->evt_wait,
@@ -241,7 +241,7 @@ static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
esoc_clink->name);
return -EIO;
}
- put_user(evt, (unsigned long __user *)uarg);
+ put_user(evt, (unsigned int __user *)uarg);
}
return err;
break;
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 79ea5a9f90ea..ebf8be80a3d9 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -59,7 +59,8 @@ msm_drm-y += adreno/adreno_device.o \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
- adreno/a5xx_snapshot.o
+ adreno/a5xx_snapshot.o \
+ adreno/a5xx_counters.o
endif
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 56dad2217289..b73f4efb1b9d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local3/projects/drm/envytools/rnndb//adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
-- /local3/projects/drm/envytools/rnndb//adreno/a5xx.xml ( 81546 bytes, from 2016-10-31 16:38:41)
-- /local3/projects/drm/envytools/rnndb//adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
-
-Copyright (C) 2013-2016 by the following authors:
+- ./rnndb/adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a5xx.xml ( 86963 bytes, from 2017-03-03 16:01:09)
+- ./rnndb/adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2017 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1759,13 +1759,11 @@ static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val
#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
-#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_EN(uint32_t i0) { return 0x000030c0 + 0x1*i0; }
-#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_CLR(uint32_t i0) { return 0x000030c8 + 0x1*i0; }
-#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
-
-#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_SEL(uint32_t i0) { return 0x000030d0 + 0x1*i0; }
#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
@@ -1783,11 +1781,9 @@ static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val
#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
-
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_EN(uint32_t i0) { return 0x00003100 + 0x1*i0; }
-#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_CLR(uint32_t i0) { return 0x00003108 + 0x1*i0; }
#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_counters.c b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
new file mode 100644
index 000000000000..f1fac5535359
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
@@ -0,0 +1,689 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "a5xx_gpu.h"
+
+/*
+ * Fixed counters are not selectable, they always count the same thing.
+ * The countable is an index into the group: countable 0 = register 0,
+ * etc and they have no select register
+ */
+static int a5xx_counter_get_fixed(struct msm_gpu *gpu,
+ struct adreno_counter_group *group,
+ u32 countable, u32 *lo, u32 *hi)
+{
+ if (countable >= group->nr_counters)
+ return -EINVAL;
+
+ if (lo)
+ *lo = group->counters[countable].lo;
+ if (hi)
+ *hi = group->counters[countable].hi;
+
+ return countable;
+}
+
+/*
+ * Most counters are selectable in that they can be programmed to count
+ * different events; in most cases there are many more countables than
+ * counters. When a new counter is requested, first walk the list to see if any
+ * other counters in that group are counting the same countable and if so reuse
+ * that counter. If not find the first empty counter in the list and register
+ * that for the desired countable. If we are out of counters too bad so sad.
+ */
+static int a5xx_counter_get(struct msm_gpu *gpu,
+ struct adreno_counter_group *group,
+ u32 countable, u32 *lo, u32 *hi)
+{
+ struct adreno_counter *counter;
+ int i, empty = -1;
+
+ spin_lock(&group->lock);
+
+ for (i = 0; i < group->nr_counters; i++) {
+ counter = &group->counters[i];
+
+ if (counter->refcount) {
+ if (counter->countable == countable) {
+ counter->refcount++;
+
+ if (lo)
+ *lo = counter->lo;
+ if (hi)
+ *hi = counter->hi;
+
+ spin_unlock(&group->lock);
+ return i;
+ }
+ } else
+ empty = (empty == -1) ? i : empty;
+ }
+
+ if (empty == -1) {
+ spin_unlock(&group->lock);
+ return -EBUSY;
+ }
+
+ counter = &group->counters[empty];
+
+ counter->refcount = 1;
+ counter->countable = countable;
+
+ if (lo)
+ *lo = counter->lo;
+ if (hi)
+ *hi = counter->hi;
+
+ spin_unlock(&group->lock);
+
+ if (group->funcs.enable)
+ group->funcs.enable(gpu, group, empty);
+
+ return empty;
+}
+
+/* The majority of the non-fixed counter selects can be programmed by the CPU */
+static void a5xx_counter_enable_cpu(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ gpu_write(gpu, counter->sel, counter->countable);
+}
+
+static void a5xx_counter_enable_pm4(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[MSM_GPU_MAX_RINGS - 1];
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ /* Turn off preemption for the duration of this command */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Idle the GPU */
+ OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
+
+ /* Enable the counter */
+ OUT_PKT4(ring, counter->sel, 1);
+ OUT_RING(ring, counter->countable);
+
+ /* Re-enable preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* Preempt into our ring if we need to */
+ a5xx_preempt_trigger(gpu);
+
+ /* wait for the operation to complete */
+ a5xx_idle(gpu, ring);
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
+/*
+ * GPMU counters are selectable but the selects are muxed together in two
+ * registers
+ */
+static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+ u32 reg;
+ int shift;
+
+ /*
+ * The selects for the GPMU counters are grouped together in two
+ * registers, a nibble for each counter. Counters 0-3 are located in
+ * GPMU_POWER_COUNTER_SELECT0 and 4-5 are in GPMU_POWER_COUNTER_SELECT1
+ */
+ if (counterid <= 3) {
+ shift = counterid << 3;
+ reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_0;
+ } else {
+ shift = (counterid - 4) << 3;
+ reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_1;
+ }
+
+ gpu_rmw(gpu, reg, 0xFF << shift, (counter->countable & 0xff) << shift);
+}
+
+/* VBIF counters are selectable but have their own programming process */
+static void a5xx_counter_enable_vbif(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 1);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 0);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_SEL(counterid),
+ counter->countable);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_EN(counterid), 1);
+}
+
+/*
+ * VBIF power counters are not slectable but need to be cleared/enabled before
+ * use
+ */
+static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 1);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 0);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_EN(counterid), 1);
+}
+
+/* GPMU always on counter needs to be enabled before use */
+static void a5xx_counter_enable_alwayson_power(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1);
+}
+
+static u64 a5xx_counter_read(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ if (counterid >= group->nr_counters)
+ return 0;
+
+ return gpu_read64(gpu, group->counters[counterid].lo,
+ group->counters[counterid].hi);
+}
+
+/*
+ * Selectable counters that are no longer used reset the countable to 0 to mark
+ * the counter as free
+ */
+static void a5xx_counter_put(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_counter *counter;
+
+ if (counterid >= group->nr_counters)
+ return;
+
+ counter = &group->counters[counterid];
+
+ spin_lock(&group->lock);
+ if (counter->refcount > 0)
+ counter->refcount--;
+ spin_unlock(&group->lock);
+}
+
+static struct adreno_counter a5xx_counters_alwayson[1] = {
+ { REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI },
+};
+
+static struct adreno_counter a5xx_counters_ccu[] = {
+ { REG_A5XX_RBBM_PERFCTR_CCU_0_LO, REG_A5XX_RBBM_PERFCTR_CCU_0_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_1_LO, REG_A5XX_RBBM_PERFCTR_CCU_1_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_2_LO, REG_A5XX_RBBM_PERFCTR_CCU_2_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_3_LO, REG_A5XX_RBBM_PERFCTR_CCU_3_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_cmp[] = {
+ { REG_A5XX_RBBM_PERFCTR_CMP_0_LO, REG_A5XX_RBBM_PERFCTR_CMP_0_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_1_LO, REG_A5XX_RBBM_PERFCTR_CMP_1_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_2_LO, REG_A5XX_RBBM_PERFCTR_CMP_2_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_3_LO, REG_A5XX_RBBM_PERFCTR_CMP_3_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_cp[] = {
+ { REG_A5XX_RBBM_PERFCTR_CP_0_LO, REG_A5XX_RBBM_PERFCTR_CP_0_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_CP_1_LO, REG_A5XX_RBBM_PERFCTR_CP_1_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_CP_2_LO, REG_A5XX_RBBM_PERFCTR_CP_2_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_CP_3_LO, REG_A5XX_RBBM_PERFCTR_CP_3_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_CP_4_LO, REG_A5XX_RBBM_PERFCTR_CP_4_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_CP_5_LO, REG_A5XX_RBBM_PERFCTR_CP_5_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_CP_6_LO, REG_A5XX_RBBM_PERFCTR_CP_6_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_CP_7_LO, REG_A5XX_RBBM_PERFCTR_CP_7_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_hlsq[] = {
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_lrz[] = {
+ { REG_A5XX_RBBM_PERFCTR_LRZ_0_LO, REG_A5XX_RBBM_PERFCTR_LRZ_0_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_1_LO, REG_A5XX_RBBM_PERFCTR_LRZ_1_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_2_LO, REG_A5XX_RBBM_PERFCTR_LRZ_2_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_3_LO, REG_A5XX_RBBM_PERFCTR_LRZ_3_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_pc[] = {
+ { REG_A5XX_RBBM_PERFCTR_PC_0_LO, REG_A5XX_RBBM_PERFCTR_PC_0_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_PC_1_LO, REG_A5XX_RBBM_PERFCTR_PC_1_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_PC_2_LO, REG_A5XX_RBBM_PERFCTR_PC_2_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_PC_3_LO, REG_A5XX_RBBM_PERFCTR_PC_3_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_PC_4_LO, REG_A5XX_RBBM_PERFCTR_PC_4_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_PC_5_LO, REG_A5XX_RBBM_PERFCTR_PC_5_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_PC_6_LO, REG_A5XX_RBBM_PERFCTR_PC_6_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_PC_7_LO, REG_A5XX_RBBM_PERFCTR_PC_7_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_ras[] = {
+ { REG_A5XX_RBBM_PERFCTR_RAS_0_LO, REG_A5XX_RBBM_PERFCTR_RAS_0_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_1_LO, REG_A5XX_RBBM_PERFCTR_RAS_1_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_2_LO, REG_A5XX_RBBM_PERFCTR_RAS_2_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_3_LO, REG_A5XX_RBBM_PERFCTR_RAS_3_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_rb[] = {
+ { REG_A5XX_RBBM_PERFCTR_RB_0_LO, REG_A5XX_RBBM_PERFCTR_RB_0_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_RB_1_LO, REG_A5XX_RBBM_PERFCTR_RB_1_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_RB_2_LO, REG_A5XX_RBBM_PERFCTR_RB_2_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_RB_3_LO, REG_A5XX_RBBM_PERFCTR_RB_3_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_RB_4_LO, REG_A5XX_RBBM_PERFCTR_RB_4_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_RB_5_LO, REG_A5XX_RBBM_PERFCTR_RB_5_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_RB_6_LO, REG_A5XX_RBBM_PERFCTR_RB_6_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_RB_7_LO, REG_A5XX_RBBM_PERFCTR_RB_7_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_rbbm[] = {
+ { REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_1_LO, REG_A5XX_RBBM_PERFCTR_RBBM_1_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_2_LO, REG_A5XX_RBBM_PERFCTR_RBBM_2_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_3_LO, REG_A5XX_RBBM_PERFCTR_RBBM_3_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_sp[] = {
+ { REG_A5XX_RBBM_PERFCTR_SP_0_LO, REG_A5XX_RBBM_PERFCTR_SP_0_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_SP_1_LO, REG_A5XX_RBBM_PERFCTR_SP_1_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_SP_2_LO, REG_A5XX_RBBM_PERFCTR_SP_2_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_SP_3_LO, REG_A5XX_RBBM_PERFCTR_SP_3_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_SP_4_LO, REG_A5XX_RBBM_PERFCTR_SP_4_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_SP_5_LO, REG_A5XX_RBBM_PERFCTR_SP_5_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_SP_6_LO, REG_A5XX_RBBM_PERFCTR_SP_6_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_SP_7_LO, REG_A5XX_RBBM_PERFCTR_SP_7_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_7 },
+ { REG_A5XX_RBBM_PERFCTR_SP_8_LO, REG_A5XX_RBBM_PERFCTR_SP_8_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_8 },
+ { REG_A5XX_RBBM_PERFCTR_SP_9_LO, REG_A5XX_RBBM_PERFCTR_SP_9_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_9 },
+ { REG_A5XX_RBBM_PERFCTR_SP_10_LO, REG_A5XX_RBBM_PERFCTR_SP_10_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_10 },
+ { REG_A5XX_RBBM_PERFCTR_SP_11_LO, REG_A5XX_RBBM_PERFCTR_SP_11_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_11 },
+};
+
+static struct adreno_counter a5xx_counters_tp[] = {
+ { REG_A5XX_RBBM_PERFCTR_TP_0_LO, REG_A5XX_RBBM_PERFCTR_TP_0_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_TP_1_LO, REG_A5XX_RBBM_PERFCTR_TP_1_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_TP_2_LO, REG_A5XX_RBBM_PERFCTR_TP_2_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_TP_3_LO, REG_A5XX_RBBM_PERFCTR_TP_3_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_TP_4_LO, REG_A5XX_RBBM_PERFCTR_TP_4_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_TP_5_LO, REG_A5XX_RBBM_PERFCTR_TP_5_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_TP_6_LO, REG_A5XX_RBBM_PERFCTR_TP_6_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_TP_7_LO, REG_A5XX_RBBM_PERFCTR_TP_7_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_tse[] = {
+ { REG_A5XX_RBBM_PERFCTR_TSE_0_LO, REG_A5XX_RBBM_PERFCTR_TSE_0_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_1_LO, REG_A5XX_RBBM_PERFCTR_TSE_1_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_2_LO, REG_A5XX_RBBM_PERFCTR_TSE_2_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_3_LO, REG_A5XX_RBBM_PERFCTR_TSE_3_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_uche[] = {
+ { REG_A5XX_RBBM_PERFCTR_UCHE_0_LO, REG_A5XX_RBBM_PERFCTR_UCHE_0_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_1_LO, REG_A5XX_RBBM_PERFCTR_UCHE_1_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_2_LO, REG_A5XX_RBBM_PERFCTR_UCHE_2_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_3_LO, REG_A5XX_RBBM_PERFCTR_UCHE_3_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_4_LO, REG_A5XX_RBBM_PERFCTR_UCHE_4_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_5_LO, REG_A5XX_RBBM_PERFCTR_UCHE_5_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_6_LO, REG_A5XX_RBBM_PERFCTR_UCHE_6_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_7_LO, REG_A5XX_RBBM_PERFCTR_UCHE_7_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_vfd[] = {
+ { REG_A5XX_RBBM_PERFCTR_VFD_0_LO, REG_A5XX_RBBM_PERFCTR_VFD_0_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_1_LO, REG_A5XX_RBBM_PERFCTR_VFD_1_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_2_LO, REG_A5XX_RBBM_PERFCTR_VFD_2_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_3_LO, REG_A5XX_RBBM_PERFCTR_VFD_3_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_3 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_4_LO, REG_A5XX_RBBM_PERFCTR_VFD_4_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_4 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_5_LO, REG_A5XX_RBBM_PERFCTR_VFD_5_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_5 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_6_LO, REG_A5XX_RBBM_PERFCTR_VFD_6_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_6 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_7_LO, REG_A5XX_RBBM_PERFCTR_VFD_7_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_7 },
+};
+
+static struct adreno_counter a5xx_counters_vpc[] = {
+ { REG_A5XX_RBBM_PERFCTR_VPC_0_LO, REG_A5XX_RBBM_PERFCTR_VPC_0_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_1_LO, REG_A5XX_RBBM_PERFCTR_VPC_1_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_1 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_2_LO, REG_A5XX_RBBM_PERFCTR_VPC_2_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_2 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_3_LO, REG_A5XX_RBBM_PERFCTR_VPC_3_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_vsc[] = {
+ { REG_A5XX_RBBM_PERFCTR_VSC_0_LO, REG_A5XX_RBBM_PERFCTR_VSC_0_HI,
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_0 },
+ { REG_A5XX_RBBM_PERFCTR_VSC_1_LO, REG_A5XX_RBBM_PERFCTR_VSC_1_HI,
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_1 },
+};
+
+static struct adreno_counter a5xx_counters_power_ccu[] = {
+ { REG_A5XX_CCU_POWER_COUNTER_0_LO, REG_A5XX_CCU_POWER_COUNTER_0_HI,
+ REG_A5XX_RB_POWERCTR_CCU_SEL_0 },
+ { REG_A5XX_CCU_POWER_COUNTER_1_LO, REG_A5XX_CCU_POWER_COUNTER_1_HI,
+ REG_A5XX_RB_POWERCTR_CCU_SEL_1 },
+};
+
+static struct adreno_counter a5xx_counters_power_cp[] = {
+ { REG_A5XX_CP_POWER_COUNTER_0_LO, REG_A5XX_CP_POWER_COUNTER_0_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_0 },
+ { REG_A5XX_CP_POWER_COUNTER_1_LO, REG_A5XX_CP_POWER_COUNTER_1_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_1 },
+ { REG_A5XX_CP_POWER_COUNTER_2_LO, REG_A5XX_CP_POWER_COUNTER_2_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_2 },
+ { REG_A5XX_CP_POWER_COUNTER_3_LO, REG_A5XX_CP_POWER_COUNTER_3_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_rb[] = {
+ { REG_A5XX_RB_POWER_COUNTER_0_LO, REG_A5XX_RB_POWER_COUNTER_0_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_0 },
+ { REG_A5XX_RB_POWER_COUNTER_1_LO, REG_A5XX_RB_POWER_COUNTER_1_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_1 },
+ { REG_A5XX_RB_POWER_COUNTER_2_LO, REG_A5XX_RB_POWER_COUNTER_2_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_2 },
+ { REG_A5XX_RB_POWER_COUNTER_3_LO, REG_A5XX_RB_POWER_COUNTER_3_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_sp[] = {
+ { REG_A5XX_SP_POWER_COUNTER_0_LO, REG_A5XX_SP_POWER_COUNTER_0_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_0 },
+ { REG_A5XX_SP_POWER_COUNTER_1_LO, REG_A5XX_SP_POWER_COUNTER_1_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_1 },
+ { REG_A5XX_SP_POWER_COUNTER_2_LO, REG_A5XX_SP_POWER_COUNTER_2_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_2 },
+ { REG_A5XX_SP_POWER_COUNTER_3_LO, REG_A5XX_SP_POWER_COUNTER_3_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_tp[] = {
+ { REG_A5XX_TP_POWER_COUNTER_0_LO, REG_A5XX_TP_POWER_COUNTER_0_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_0 },
+ { REG_A5XX_TP_POWER_COUNTER_1_LO, REG_A5XX_TP_POWER_COUNTER_1_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_1 },
+ { REG_A5XX_TP_POWER_COUNTER_2_LO, REG_A5XX_TP_POWER_COUNTER_2_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_2 },
+ { REG_A5XX_TP_POWER_COUNTER_3_LO, REG_A5XX_TP_POWER_COUNTER_3_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_uche[] = {
+ { REG_A5XX_UCHE_POWER_COUNTER_0_LO, REG_A5XX_UCHE_POWER_COUNTER_0_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 },
+ { REG_A5XX_UCHE_POWER_COUNTER_1_LO, REG_A5XX_UCHE_POWER_COUNTER_1_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 },
+ { REG_A5XX_UCHE_POWER_COUNTER_2_LO, REG_A5XX_UCHE_POWER_COUNTER_2_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 },
+ { REG_A5XX_UCHE_POWER_COUNTER_3_LO, REG_A5XX_UCHE_POWER_COUNTER_3_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_vbif[] = {
+ { REG_A5XX_VBIF_PERF_CNT_LOW0, REG_A5XX_VBIF_PERF_CNT_HIGH0 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW1, REG_A5XX_VBIF_PERF_CNT_HIGH1 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW2, REG_A5XX_VBIF_PERF_CNT_HIGH2 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW3, REG_A5XX_VBIF_PERF_CNT_HIGH3 },
+};
+
+static struct adreno_counter a5xx_counters_gpmu[] = {
+ { REG_A5XX_GPMU_POWER_COUNTER_0_LO, REG_A5XX_GPMU_POWER_COUNTER_0_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_1_LO, REG_A5XX_GPMU_POWER_COUNTER_1_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_2_LO, REG_A5XX_GPMU_POWER_COUNTER_2_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_3_LO, REG_A5XX_GPMU_POWER_COUNTER_3_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_4_LO, REG_A5XX_GPMU_POWER_COUNTER_4_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_5_LO, REG_A5XX_GPMU_POWER_COUNTER_5_HI },
+};
+
+static struct adreno_counter a5xx_counters_vbif_power[] = {
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW0, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 },
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW1, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 },
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW2, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 },
+};
+
+static struct adreno_counter a5xx_counters_alwayson_power[] = {
+ { REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO,
+ REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI },
+};
+
+#define DEFINE_COUNTER_GROUP(_name, _array, _get, _enable, _put) \
+static struct adreno_counter_group _name = { \
+ .counters = _array, \
+ .nr_counters = ARRAY_SIZE(_array), \
+ .lock = __SPIN_LOCK_UNLOCKED(_name.lock), \
+ .funcs = { \
+ .get = _get, \
+ .enable = _enable, \
+ .read = a5xx_counter_read, \
+ .put = _put, \
+ }, \
+}
+
+#define DEFAULT_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put)
+
+#define SPTP_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put)
+
+/* "standard" counters */
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz);
+
+/* SP/TP counters */
+SPTP_COUNTER_GROUP(a5xx_counter_group_hlsq, a5xx_counters_hlsq);
+SPTP_COUNTER_GROUP(a5xx_counter_group_tp, a5xx_counters_tp);
+SPTP_COUNTER_GROUP(a5xx_counter_group_sp, a5xx_counters_sp);
+
+/* Power counters */
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp);
+DEFAULT_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche);
+
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson, a5xx_counters_alwayson,
+ a5xx_counter_get_fixed, NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif, a5xx_counters_vbif,
+ a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_gpmu, a5xx_counters_gpmu,
+ a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif_power, a5xx_counters_vbif_power,
+ a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson_power,
+ a5xx_counters_alwayson_power, a5xx_counter_get_fixed,
+ a5xx_counter_enable_alwayson_power, NULL);
+
+static const struct adreno_counter_group *a5xx_counter_groups[] = {
+ [MSM_COUNTER_GROUP_ALWAYSON] = &a5xx_counter_group_alwayson,
+ [MSM_COUNTER_GROUP_CCU] = &a5xx_counter_group_ccu,
+ [MSM_COUNTER_GROUP_CMP] = &a5xx_counter_group_cmp,
+ [MSM_COUNTER_GROUP_CP] = &a5xx_counter_group_cp,
+ [MSM_COUNTER_GROUP_HLSQ] = &a5xx_counter_group_hlsq,
+ [MSM_COUNTER_GROUP_LRZ] = &a5xx_counter_group_lrz,
+ [MSM_COUNTER_GROUP_PC] = &a5xx_counter_group_pc,
+ [MSM_COUNTER_GROUP_RAS] = &a5xx_counter_group_ras,
+ [MSM_COUNTER_GROUP_RB] = &a5xx_counter_group_rb,
+ [MSM_COUNTER_GROUP_RBBM] = &a5xx_counter_group_rbbm,
+ [MSM_COUNTER_GROUP_SP] = &a5xx_counter_group_sp,
+ [MSM_COUNTER_GROUP_TP] = &a5xx_counter_group_tp,
+ [MSM_COUNTER_GROUP_TSE] = &a5xx_counter_group_tse,
+ [MSM_COUNTER_GROUP_UCHE] = &a5xx_counter_group_uche,
+ [MSM_COUNTER_GROUP_VFD] = &a5xx_counter_group_vfd,
+ [MSM_COUNTER_GROUP_VPC] = &a5xx_counter_group_vpc,
+ [MSM_COUNTER_GROUP_VSC] = &a5xx_counter_group_vsc,
+ [MSM_COUNTER_GROUP_VBIF] = &a5xx_counter_group_vbif,
+ [MSM_COUNTER_GROUP_GPMU_PWR] = &a5xx_counter_group_gpmu,
+ [MSM_COUNTER_GROUP_CCU_PWR] = &a5xx_counter_group_power_ccu,
+ [MSM_COUNTER_GROUP_CP_PWR] = &a5xx_counter_group_power_cp,
+ [MSM_COUNTER_GROUP_RB_PWR] = &a5xx_counter_group_power_rb,
+ [MSM_COUNTER_GROUP_SP_PWR] = &a5xx_counter_group_power_sp,
+ [MSM_COUNTER_GROUP_TP_PWR] = &a5xx_counter_group_power_tp,
+ [MSM_COUNTER_GROUP_UCHE_PWR] = &a5xx_counter_group_power_uche,
+ [MSM_COUNTER_GROUP_VBIF_PWR] = &a5xx_counter_group_vbif_power,
+ [MSM_COUNTER_GROUP_ALWAYSON_PWR] =
+ &a5xx_counter_group_alwayson_power,
+};
+
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu)
+{
+ adreno_gpu->counter_groups = a5xx_counter_groups;
+ adreno_gpu->nr_counter_groups = ARRAY_SIZE(a5xx_counter_groups);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index f5847bc60c49..02c4f2e3155d 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -856,14 +856,6 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
-
- if (ring != a5xx_gpu->cur_ring) {
- WARN(1, "Tried to idle a non-current ringbuffer\n");
- return false;
- }
-
/* wait for CP to drain ringbuffer: */
if (!adreno_idle(gpu, ring))
return false;
@@ -1218,6 +1210,9 @@ static const struct adreno_gpu_funcs funcs = {
.show = a5xx_show,
#endif
.snapshot = a5xx_snapshot,
+ .get_counter = adreno_get_counter,
+ .read_counter = adreno_read_counter,
+ .put_counter = adreno_put_counter,
},
.get_timestamp = a5xx_get_timestamp,
};
@@ -1341,5 +1336,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
/* Set up the preemption specific bits and pieces for each ringbuffer */
a5xx_preempt_init(gpu);
+ a5xx_counters_init(adreno_gpu);
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 3de14fe42a1b..8eb3838ffe90 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -184,4 +184,6 @@ static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
}
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
+
#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1883825354e..969ed810ce9d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -709,3 +709,52 @@ void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
adreno_snapshot_os(gpu, snapshot);
adreno_snapshot_ringbuffers(gpu, snapshot);
}
+
+/* Return the group struct associated with the counter id */
+
+static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
+ u32 groupid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ if (!adreno_gpu->counter_groups)
+ return ERR_PTR(-ENODEV);
+
+ if (groupid >= adreno_gpu->nr_counter_groups)
+ return ERR_PTR(-EINVAL);
+
+ return (struct adreno_counter_group *)
+ adreno_gpu->counter_groups[groupid];
+}
+
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi)
+{
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR_OR_NULL(group) && group->funcs.get)
+ return group->funcs.get(gpu, group, countable, lo, hi);
+
+ return -ENODEV;
+}
+
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
+{
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR(group) && group->funcs.read)
+ return group->funcs.read(gpu, group, counterid);
+
+ return 0;
+}
+
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
+{
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR(group) && group->funcs.put)
+ group->funcs.put(gpu, group, counterid);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 30461115281c..8e8f3e5182d6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -99,6 +99,30 @@ struct adreno_rbmemptrs {
volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
};
+struct adreno_counter {
+ u32 lo;
+ u32 hi;
+ u32 sel;
+ u32 countable;
+ u32 refcount;
+};
+
+struct adreno_counter_group {
+ struct adreno_counter *counters;
+ size_t nr_counters;
+ spinlock_t lock;
+ struct {
+ int (*get)(struct msm_gpu *,
+ struct adreno_counter_group *, u32, u32 *, u32 *);
+ void (*enable)(struct msm_gpu *,
+ struct adreno_counter_group *, int);
+ u64 (*read)(struct msm_gpu *,
+ struct adreno_counter_group *, int);
+ void (*put)(struct msm_gpu *,
+ struct adreno_counter_group *, int);
+ } funcs;
+};
+
struct adreno_gpu {
struct msm_gpu base;
struct adreno_rev rev;
@@ -129,6 +153,9 @@ struct adreno_gpu {
uint32_t quirks;
uint32_t speed_bin;
+
+ const struct adreno_counter_group **counter_groups;
+ int nr_counter_groups;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@@ -235,6 +262,11 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu);
void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi);
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 532ff8677259..276329b7b10c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -606,6 +606,8 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ INIT_LIST_HEAD(&ctx->counters);
+
file->driver_priv = ctx;
kms = priv->kms;
@@ -634,6 +636,9 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
if (kms && kms->funcs && kms->funcs->postclose)
kms->funcs->postclose(kms, file);
+ if (priv->gpu)
+ msm_gpu_cleanup_counters(priv->gpu, ctx);
+
mutex_lock(&dev->struct_mutex);
if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
@@ -1584,6 +1589,41 @@ void msm_send_crtc_notification(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static int msm_ioctl_counter_get(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_get(priv->gpu, data, ctx);
+
+ return -ENODEV;
+}
+
+static int msm_ioctl_counter_put(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_put(priv->gpu, data, ctx);
+
+ return -ENODEV;
+}
+
+static int msm_ioctl_counter_read(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_read(priv->gpu, data);
+
+ return -ENODEV;
+}
+
int msm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
@@ -1619,6 +1659,12 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_UNLOCKED|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_GET, msm_ioctl_counter_get,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_PUT, msm_ioctl_counter_put,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read,
+ DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d8a4c34e9be0..d2d118cf7e07 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -76,6 +76,7 @@ struct msm_gem_vma;
struct msm_file_private {
struct msm_gem_address_space *aspace;
+ struct list_head counters;
};
enum msm_mdp_plane_property {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 14c0cfc58270..5a505a8bf328 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -587,6 +587,118 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
return ret;
}
+struct msm_context_counter {
+ u32 groupid;
+ int counterid;
+ struct list_head node;
+};
+
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry;
+ int counterid;
+ u32 lo = 0, hi = 0;
+
+ if (!ctx || !gpu->funcs->get_counter)
+ return -ENODEV;
+
+ counterid = gpu->funcs->get_counter(gpu, data->groupid, data->countable,
+ &lo, &hi);
+
+ if (counterid < 0)
+ return counterid;
+
+ /*
+ * Check to see if the counter in question is already held by this
+ * process. If it does, put it back and return an error.
+ */
+ list_for_each_entry(entry, &ctx->counters, node) {
+ if (entry->groupid == data->groupid &&
+ entry->counterid == counterid) {
+ gpu->funcs->put_counter(gpu, data->groupid, counterid);
+ return -EBUSY;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ gpu->funcs->put_counter(gpu, data->groupid, counterid);
+ return -ENOMEM;
+ }
+
+ entry->groupid = data->groupid;
+ entry->counterid = counterid;
+ list_add_tail(&entry->node, &ctx->counters);
+
+ data->counterid = counterid;
+ data->counter_lo = lo;
+ data->counter_hi = hi;
+
+ return 0;
+}
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry;
+
+ list_for_each_entry(entry, &ctx->counters, node) {
+ if (entry->groupid == data->groupid &&
+ entry->counterid == data->counterid) {
+ gpu->funcs->put_counter(gpu, data->groupid,
+ data->counterid);
+
+ list_del(&entry->node);
+ kfree(entry);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry, *tmp;
+
+ if (!ctx)
+ return;
+
+ list_for_each_entry_safe(entry, tmp, &ctx->counters, node) {
+ gpu->funcs->put_counter(gpu, entry->groupid, entry->counterid);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu, struct drm_msm_counter_read *data)
+{
+ int i;
+
+ if (!gpu->funcs->read_counter)
+ return 0;
+
+ for (i = 0; i < data->nr_ops; i++) {
+ struct drm_msm_counter_read_op op;
+ void __user *ptr = (void __user *)(uintptr_t)
+ (data->ops + (i * sizeof(op)));
+
+ if (copy_from_user(&op, ptr, sizeof(op)))
+ return -EFAULT;
+
+ op.value = gpu->funcs->read_counter(gpu, op.groupid,
+ op.counterid);
+
+ if (copy_to_user(ptr, &op, sizeof(op)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
/*
* Init/Cleanup:
*/
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 06dfaabbfcfe..3fac423929c5 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -71,6 +71,10 @@ struct msm_gpu_funcs {
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+ int (*get_counter)(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi);
+ void (*put_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
+ u64 (*read_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
};
struct msm_gpu {
@@ -258,4 +262,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx);
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx);
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+ struct msm_file_private *ctx);
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu,
+ struct drm_msm_counter_read *data);
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 202fee4711c1..537cca877f66 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -164,11 +164,6 @@
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FAB_ID_GF 0x30
-#define FAB_ID_SMIC 0x11
-#define FAB_ID_660_GF 0x0
-#define FAB_ID_660_TSMC 0x2
-#define FAB_ID_660_MX 0x3
#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1303168
#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3784
#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1338433
@@ -402,11 +397,11 @@ static int rradc_get_660_fab_coeff(struct rradc_chip *chip,
int64_t *offset, int64_t *slope)
{
switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_660_GF:
+ case PM660_FAB_ID_GF:
*offset = FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV;
*slope = FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
break;
- case FAB_ID_660_TSMC:
+ case PM660_FAB_ID_TSMC:
*offset = FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV;
*slope = FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
break;
@@ -422,11 +417,11 @@ static int rradc_get_8998_fab_coeff(struct rradc_chip *chip,
int64_t *offset, int64_t *slope)
{
switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_GF:
+ case PMI8998_FAB_ID_GF:
*offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
*slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
break;
- case FAB_ID_SMIC:
+ case PMI8998_FAB_ID_SMIC:
*offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
*slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
break;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 075c18e0e4ae..1d4e8a4ce206 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1128,6 +1128,16 @@ config TOUCHSCREEN_FT5X06_GESTURE
If unsure, say N.
+config TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
+ bool "Synaptics DSX firmware update extra sysfs attributes"
+ depends on TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE
+ help
+ Say Y here to enable support for extra sysfs attributes
+ supporting firmware update in a development environment.
+ This does not affect the core or other subsystem attributes.
+
+ If unsure, say N.
+
config TOUCHSCREEN_ROHM_BU21023
tristate "ROHM BU21023/24 Dual touch support resistive touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
index 0ec16e606545..4787f2bcd768 100644
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_fw_update.c
@@ -102,6 +102,7 @@
(fwu->config_data[2] == config_id[2]) && \
(fwu->config_data[3] == config_id[3]))
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static ssize_t fwu_sysfs_show_image(struct file *data_file,
struct kobject *kobj, struct bin_attribute *attributes,
char *buf, loff_t pos, size_t count);
@@ -157,6 +158,7 @@ static ssize_t fwu_sysfs_config_id_show(struct device *dev,
static ssize_t fwu_sysfs_package_id_show(struct device *dev,
struct device_attribute *attr, char *buf);
+#endif
enum bl_version {
V5 = 5,
@@ -296,6 +298,7 @@ struct synaptics_rmi4_fwu_handle {
struct synaptics_rmi4_data *rmi4_data;
};
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static struct bin_attribute dev_attr_data = {
.attr = {
.name = "data",
@@ -305,9 +308,11 @@ static struct bin_attribute dev_attr_data = {
.read = fwu_sysfs_show_image,
.write = fwu_sysfs_store_image,
};
+#endif
static struct device_attribute attrs[] = {
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
__ATTR(force_update_fw, S_IWUSR | S_IWGRP,
NULL,
fwu_sysfs_force_reflash_store),
@@ -353,6 +358,7 @@ static struct device_attribute attrs[] = {
__ATTR(package_id, S_IRUGO,
fwu_sysfs_package_id_show,
synaptics_rmi4_store_error),
+#endif
};
static struct synaptics_rmi4_fwu_handle *fwu;
@@ -1220,6 +1226,7 @@ write_config:
return retval;
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static int fwu_start_write_config(void)
{
int retval;
@@ -1395,6 +1402,7 @@ exit:
return retval;
}
+#endif
static int fwu_do_lockdown(void)
{
@@ -1585,6 +1593,7 @@ int synaptics_dsx_fw_updater(unsigned char *fw_data)
}
EXPORT_SYMBOL(synaptics_dsx_fw_updater);
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
static ssize_t fwu_sysfs_show_image(struct file *data_file,
struct kobject *kobj, struct bin_attribute *attributes,
char *buf, loff_t pos, size_t count)
@@ -1972,6 +1981,7 @@ static ssize_t fwu_sysfs_package_id_show(struct device *dev,
(package_id[1] << 8) | package_id[0],
(package_id[3] << 8) | package_id[2]);
}
+#endif
static void synaptics_rmi4_fwu_attn(struct synaptics_rmi4_data *rmi4_data,
unsigned char intr_mask)
@@ -2045,6 +2055,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
fwu->do_lockdown = DO_LOCKDOWN;
fwu->initialized = true;
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
retval = sysfs_create_bin_file(&rmi4_data->input_dev->dev.kobj,
&dev_attr_data);
if (retval < 0) {
@@ -2053,6 +2064,7 @@ static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)
__func__);
goto exit_free_fwu;
}
+#endif
for (attr_count = 0; attr_count < ARRAY_SIZE(attrs); attr_count++) {
retval = sysfs_create_file(&rmi4_data->input_dev->dev.kobj,
@@ -2074,7 +2086,9 @@ exit_remove_attrs:
&attrs[attr_count].attr);
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
exit_free_fwu:
kfree(fwu);
@@ -2096,7 +2110,9 @@ static void synaptics_rmi4_fwu_remove(struct synaptics_rmi4_data *rmi4_data)
&attrs[attr_count].attr);
}
+#ifdef CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_EXTRA_SYSFS
sysfs_remove_bin_file(&rmi4_data->input_dev->dev.kobj, &dev_attr_data);
+#endif
kfree(fwu->read_config_buf);
kfree(fwu);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 54b237a9fc05..b44b7573e0e6 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -2441,7 +2441,7 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg)
total_bandwidth +=
stream_info->bandwidth[
vfe_idx];
- stream_info->state = PAUSING;
+ stream_info->state = PAUSED;
}
spin_unlock_irqrestore(&stream_info->lock, flags);
}
@@ -2455,7 +2455,7 @@ int msm_isp_ab_ib_update_lpm_mode(struct vfe_device *vfe_dev, void *arg)
msm_isp_get_stream_common_data(vfe_dev,
ab_ib_vote->stream_src[i]);
spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state == PAUSING) {
+ if (stream_info->state == PAUSED) {
vfe_idx =
msm_isp_get_vfe_idx_for_stream(vfe_dev,
stream_info);
@@ -2813,6 +2813,7 @@ static int __msm_isp_check_stream_state(struct msm_vfe_axi_stream *stream_info,
case RESUMING:
case RESUME_PENDING:
case ACTIVE:
+ case PAUSED:
if (cmd != 0)
return -EALREADY;
break;
@@ -2879,9 +2880,11 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
* those state transitions instead of directly forcing stream to
* be INACTIVE
*/
- while (stream_info->state != ACTIVE)
- __msm_isp_axi_stream_update(stream_info,
+ if (stream_info->state != PAUSED) {
+ while (stream_info->state != ACTIVE)
+ __msm_isp_axi_stream_update(stream_info,
&timestamp);
+ }
msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
stream_info->undelivered_request_cnt = 0;
@@ -2894,8 +2897,15 @@ static void __msm_isp_stop_axi_streams(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.axi_ops.
clear_wm_irq_mask(vfe_dev, stream_info);
}
- init_completion(&stream_info->inactive_comp);
- stream_info->state = STOP_PENDING;
+ if (stream_info->state == ACTIVE) {
+ init_completion(&stream_info->inactive_comp);
+ stream_info->state = STOP_PENDING;
+ } else if (stream_info->state == PAUSED) {
+ /* don't wait for reg update */
+ stream_info->state = STOP_PENDING;
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = INACTIVE;
+ }
spin_unlock_irqrestore(&stream_info->lock, flags);
}
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 9d52107c9993..41d8ef577a27 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -73,7 +73,7 @@ static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
- uint8_t intf_type)
+ enum msm_ispif_vfe_intf intf_type)
{
return ((csid_version <= CSID_VERSION_V22 && intf_type != VFE0) ||
(intf_type >= VFE_MAX)) ? false : true;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 92b6e8ffa92e..76a4f1e39837 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -477,6 +477,7 @@ static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
{
int rc;
u32 data;
+ struct device_node *node;
rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
&data);
@@ -505,6 +506,19 @@ static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
+ node = of_get_child_by_name(pdev->dev.of_node,
+ "qcom,sde-reg-bus");
+ if (node) {
+ mdata->reg_bus_pdata = msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(mdata->reg_bus_pdata)) {
+ SDEROT_DBG("bus_pdata reg_bus failed\n");
+ mdata->reg_bus_pdata = NULL;
+ }
+ } else {
+ SDEROT_DBG("sde-reg-bus not found\n");
+ mdata->reg_bus_pdata = NULL;
+ }
+
return 0;
}
@@ -553,9 +567,10 @@ static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
if (!mdata->reg_bus_hdl) {
/* Continue without reg_bus scaling */
SDEROT_WARN("reg_bus_client register failed\n");
- } else
+ } else {
SDEROT_DBG("register reg_bus_hdl=%x\n",
mdata->reg_bus_hdl);
+ }
}
return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 9ba0b7d93616..d68ff4fde306 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -195,6 +195,7 @@ struct sde_rot_data_type {
struct ion_client *iclient;
bool handoff_done;
+ struct msm_bus_scale_pdata *reg_bus_pdata;
};
int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 1e85923c20b1..442e80e7100e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -2419,6 +2419,7 @@ static int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
{
int ret = 0, i;
int usecases;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
@@ -2431,12 +2432,16 @@ static int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
}
}
- mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
- usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
- for (i = 0; i < usecases; i++) {
- rot_reg_bus_usecases[i].num_paths = 1;
- rot_reg_bus_usecases[i].vectors =
- &rot_reg_bus_vectors[i];
+ if (mdata && mdata->reg_bus_pdata) {
+ mgr->reg_bus.bus_scale_pdata = mdata->reg_bus_pdata;
+ } else {
+ mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+ usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+ for (i = 0; i < usecases; i++) {
+ rot_reg_bus_usecases[i].num_paths = 1;
+ rot_reg_bus_usecases[i].vectors =
+ &rot_reg_bus_vectors[i];
+ }
}
return ret;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index e170c9ffafc7..0cd8e613c224 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1924,8 +1924,13 @@ static long sde_rotator_private_ioctl(struct file *file, void *fh,
static long sde_rotator_compat_ioctl32(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct video_device *vdev = video_devdata(file);
+ struct sde_rotator_ctx *ctx =
+ sde_rotator_ctx_from_fh(file->private_data);
long ret;
+ mutex_lock(vdev->lock);
+
switch (cmd) {
case VIDIOC_S_SDE_ROTATOR_FENCE:
case VIDIOC_G_SDE_ROTATOR_FENCE:
@@ -1934,14 +1939,14 @@ static long sde_rotator_compat_ioctl32(struct file *file,
if (copy_from_user(&fence, (void __user *)arg,
sizeof(struct msm_sde_rotator_fence)))
- return -EFAULT;
+ goto ioctl32_error;
ret = sde_rotator_private_ioctl(file, file->private_data,
0, cmd, (void *)&fence);
if (copy_to_user((void __user *)arg, &fence,
sizeof(struct msm_sde_rotator_fence)))
- return -EFAULT;
+ goto ioctl32_error;
break;
}
@@ -1952,24 +1957,31 @@ static long sde_rotator_compat_ioctl32(struct file *file,
if (copy_from_user(&comp_ratio, (void __user *)arg,
sizeof(struct msm_sde_rotator_comp_ratio)))
- return -EFAULT;
+ goto ioctl32_error;
ret = sde_rotator_private_ioctl(file, file->private_data,
0, cmd, (void *)&comp_ratio);
if (copy_to_user((void __user *)arg, &comp_ratio,
sizeof(struct msm_sde_rotator_comp_ratio)))
- return -EFAULT;
+ goto ioctl32_error;
break;
}
default:
+ SDEDEV_ERR(ctx->rot_dev->dev, "invalid ioctl32 type:%x\n", cmd);
ret = -ENOIOCTLCMD;
break;
}
+ mutex_unlock(vdev->lock);
return ret;
+
+ioctl32_error:
+ mutex_unlock(vdev->lock);
+ SDEDEV_ERR(ctx->rot_dev->dev, "error handling ioctl32 cmd:%x\n", cmd);
+ return -EFAULT;
}
#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 2c79ad7e45be..c3a0cfb390c4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1600,6 +1600,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
u32 safe_lut = 0; /* applicable for realtime client only */
u32 flags = 0;
u32 rststs = 0;
+ u32 reg = 0;
struct sde_rotation_item *item;
if (!hw || !entry) {
@@ -1836,10 +1837,10 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
/* Enable write gather for writeback to remove write gaps, which
* may hang AXI/BIMC/SDE.
*/
- if (!((mdata->mdss_version == MDSS_MDP_HW_REV_320) ||
- (mdata->mdss_version == MDSS_MDP_HW_REV_330)))
- SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
- BIT(mdata->vbif_xin_id[XIN_WRITEBACK]));
+
+ reg = SDE_VBIF_READ(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
+ reg | BIT(mdata->vbif_xin_id[XIN_WRITEBACK]));
if (mdata->vbif_reg_unlock)
mdata->vbif_reg_unlock();
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 460ffc79f566..33ec0c15efa6 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -97,11 +97,6 @@
*/
#define SLEEP_SET_HW_KEY_MS 220
-#define QSEECOM_ALIGN_SIZE 0x40
-#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
-#define QSEECOM_ALIGN(x)\
- ((x + QSEECOM_ALIGN_SIZE) & (~QSEECOM_ALIGN_MASK))
-
/* hdcp command status */
#define HDCP_SUCCESS 0
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
index 8f981903c3a1..40426b749f60 100644
--- a/drivers/misc/qseecom_kernel.h
+++ b/drivers/misc/qseecom_kernel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2013, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,7 +19,7 @@
#define QSEECOM_ALIGN_SIZE 0x40
#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
#define QSEECOM_ALIGN(x) \
- ((x + QSEECOM_ALIGN_SIZE) & (~QSEECOM_ALIGN_MASK))
+ ((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
/*
* struct qseecom_handle -
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b696f54286da..2eaac11ec8ba 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2736,14 +2736,15 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
msm_host->offset;
unsigned long flags;
bool done = false;
- u32 io_sig_sts;
+ u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
spin_lock_irqsave(&host->lock, flags);
pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
mmc_hostname(host->mmc), __func__, req_type,
msm_host->curr_pwr_state, msm_host->curr_io_level);
- io_sig_sts = sdhci_msm_readl_relaxed(host,
- msm_host_offset->CORE_GENERICS);
+ if (!msm_host->mci_removed)
+ io_sig_sts = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_GENERICS);
/*
* The IRQ for request type IO High/Low will be generated when -
@@ -3913,8 +3914,8 @@ void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
group->req.type = PM_QOS_REQ_AFFINE_CORES;
cpumask_copy(&group->req.cpus_affine,
&msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
- /* For initialization phase, set the performance mode latency */
- group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE];
+ /* We set default latency here for all pm_qos cpu groups. */
+ group->latency = PM_QOS_DEFAULT_VALUE;
pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
group->latency);
pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f184fb5bd110..fe75c7d4372d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -425,4 +425,6 @@ config FUJITSU_ES
source "drivers/net/hyperv/Kconfig"
+source "drivers/net/rmnet/Kconfig"
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 900b0c5320bb..3cb2c188ee3f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -70,3 +70,4 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
+obj-$(CONFIG_RMNET) += rmnet/
diff --git a/drivers/net/rmnet/Kconfig b/drivers/net/rmnet/Kconfig
new file mode 100644
index 000000000000..751893959b57
--- /dev/null
+++ b/drivers/net/rmnet/Kconfig
@@ -0,0 +1,21 @@
+#
+# RMNET MAP driver
+#
+
+menuconfig RMNET
+ depends on NETDEVICES
+ bool "RmNet MAP driver"
+ ---help---
+ If you say Y here, then the rmnet module will be statically
+ compiled into the kernel. The rmnet module provides MAP
+ functionality for embedded and bridged traffic.
+if RMNET
+
+config RMNET_DEBUG
+ bool "RmNet Debug Logging"
+ ---help---
+ Say Y here if you want RmNet to be able to log packets in main
+ system log. This should not be enabled on production builds as it can
+ impact system performance. Note that simply enabling it here will not
+ enable the logging; it must be enabled at run-time as well.
+endif # RMNET
diff --git a/drivers/net/rmnet/Makefile b/drivers/net/rmnet/Makefile
new file mode 100644
index 000000000000..2b6c9cf3756b
--- /dev/null
+++ b/drivers/net/rmnet/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the RMNET module
+#
+
+rmnet-y := rmnet_main.o
+rmnet-y += rmnet_config.o
+rmnet-y += rmnet_vnd.o
+rmnet-y += rmnet_handlers.o
+rmnet-y += rmnet_map_data.o
+rmnet-y += rmnet_map_command.o
+rmnet-y += rmnet_stats.o
+obj-$(CONFIG_RMNET) += rmnet.o
+
+CFLAGS_rmnet_main.o := -I$(src)
diff --git a/drivers/net/rmnet/rmnet_config.c b/drivers/net/rmnet/rmnet_config.c
new file mode 100644
index 000000000000..a20f54adc0b3
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_config.c
@@ -0,0 +1,1157 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET configuration engine
+ *
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/rmnet.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_vnd.h"
+#include "rmnet_private.h"
+
+RMNET_LOG_MODULE(RMNET_LOGMASK_CONFIG);
+
+/* Local Definitions and Declarations */
+#define RMNET_LOCAL_LOGICAL_ENDPOINT -1
+#define RMNET_MAX_AGG_COUNT (128)
+
+static struct sock *nl_socket_handle;
+
+static struct netlink_kernel_cfg rmnet_netlink_cfg = {
+ .input = rmnet_config_netlink_msg_handler
+};
+
+static struct notifier_block rmnet_dev_notifier = {
+ .notifier_call = rmnet_config_notify_cb,
+ .next = 0,
+ .priority = 0
+};
+
+struct rmnet_free_vnd_work {
+ struct work_struct work;
+ int vnd_id[RMNET_MAX_VND];
+ int count;
+};
+
+/* Init and Cleanup */
+
+static struct sock *_rmnet_config_start_netlink(void)
+{
+ return netlink_kernel_create(&init_net,
+ RMNET_NETLINK_PROTO,
+ &rmnet_netlink_cfg);
+}
+
+/* rmnet_config_init() - Startup init
+ *
+ * Registers netlink protocol with kernel and opens socket. Netlink handler is
+ * registered with kernel.
+ */
+int rmnet_config_init(void)
+{
+ int rc;
+
+ nl_socket_handle = _rmnet_config_start_netlink();
+ if (!nl_socket_handle) {
+ LOGE("%s", "Failed to init netlink socket");
+ return RMNET_INIT_ERROR;
+ }
+
+ rc = register_netdevice_notifier(&rmnet_dev_notifier);
+ if (rc != 0) {
+ LOGE("Failed to register device notifier; rc=%d", rc);
+ netlink_kernel_release(nl_socket_handle);
+ return RMNET_INIT_ERROR;
+ }
+
+ return 0;
+}
+
+/* rmnet_config_exit() - Cleans up all netlink related resources
+ */
+void rmnet_config_exit(void)
+{
+ int rc;
+
+ netlink_kernel_release(nl_socket_handle);
+ rc = unregister_netdevice_notifier(&rmnet_dev_notifier);
+ if (rc != 0)
+ LOGE("Failed to unregister device notifier; rc=%d", rc);
+}
+
+/* Helper Functions */
+
+/* _rmnet_is_physical_endpoint_associated() - Determines if device is associated
+ * @dev: Device to get check
+ *
+ * Compares device rx_handler callback pointer against known function
+ *
+ * Return:
+ * - 1 if associated
+ * - 0 if NOT associated
+ */
+static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev)
+{
+ rx_handler_func_t *rx_handler;
+
+ rx_handler = rcu_dereference(dev->rx_handler);
+
+ if (rx_handler == rmnet_rx_handler)
+ return 1;
+ else
+ return 0;
+}
+
+/* _rmnet_get_phys_ep_config() - Get physical ep config for an associated device
+ * @dev: Device to get endpoint configuration from
+ *
+ * Return:
+ * - pointer to configuration if successful
+ * - 0 (null) if device is not associated
+ */
+static inline struct rmnet_phys_ep_conf_s *_rmnet_get_phys_ep_config
+ (struct net_device *dev)
+{
+ if (_rmnet_is_physical_endpoint_associated(dev))
+ return (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ else
+ return NULL;
+}
+
+/* _rmnet_get_logical_ep() - Gets the logical end point configuration
+ * structure for a network device
+ * @dev: Device to get endpoint configuration from
+ * @config_id: Logical endpoint id on device
+ * Retrieves the logical_endpoint_config structure.
+ *
+ * Return:
+ * - End point configuration structure
+ * - NULL in case of an error
+ */
+struct rmnet_logical_ep_conf_s *_rmnet_get_logical_ep(struct net_device *dev,
+ int config_id)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+
+ if (rmnet_vnd_is_vnd(dev)) {
+ epconfig_l = rmnet_vnd_get_le_config(dev);
+ } else {
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config)
+ return NULL;
+
+ if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+ epconfig_l = &config->local_ep;
+ else
+ epconfig_l = &config->muxed_ep[config_id];
+ }
+
+ return epconfig_l;
+}
+
+static void _rmnet_netlink_set_link_egress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code =
+ rmnet_set_egress_data_format(dev,
+ rmnet_header->data_format.flags,
+ rmnet_header->data_format.agg_size,
+ rmnet_header->data_format.agg_count
+ );
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_set_link_ingress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_set_ingress_data_format(
+ dev,
+ rmnet_header->data_format.flags);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_set_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev, *dev2;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1 ||
+ rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.dev);
+
+ dev2 = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.next_dev);
+
+ if (dev && dev2)
+ resp_rmnet->return_code =
+ rmnet_set_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id,
+ rmnet_header->local_ep_config.operating_mode,
+ dev2);
+ else
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (dev)
+ dev_put(dev);
+ if (dev2)
+ dev_put(dev2);
+}
+
+static void _rmnet_netlink_unset_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1 ||
+ rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net, rmnet_header->local_ep_config.dev);
+
+ if (dev) {
+ resp_rmnet->return_code =
+ rmnet_unset_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id);
+ dev_put(dev);
+ } else {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+}
+
+static void _rmnet_netlink_get_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1 ||
+ rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net, rmnet_header->local_ep_config.dev);
+
+ if (dev) {
+ resp_rmnet->return_code =
+ rmnet_get_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id,
+ &resp_rmnet->local_ep_config.operating_mode,
+ resp_rmnet->local_ep_config.next_dev,
+ sizeof(resp_rmnet->local_ep_config.next_dev));
+ } else {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ if (resp_rmnet->return_code == RMNET_CONFIG_OK) {
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+ ->local_ep_config);
+ }
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_associate_network_device
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_associate_network_device(dev);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_unassociate_network_device
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_unassociate_network_device(dev);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_network_device_associated
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = _rmnet_is_physical_endpoint_associated(dev);
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_egress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ struct rmnet_phys_ep_conf_s *config;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ config = _rmnet_get_phys_ep_config(dev);
+ if (!config) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ dev_put(dev);
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+ ->data_format);
+ resp_rmnet->data_format.flags = config->egress_data_format;
+ resp_rmnet->data_format.agg_count = 0;
+ resp_rmnet->data_format.agg_size = 0;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_ingress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ struct rmnet_phys_ep_conf_s *config;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ config = _rmnet_get_phys_ep_config(dev);
+ if (!config) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ dev_put(dev);
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+ ->data_format);
+ resp_rmnet->data_format.flags = config->ingress_data_format;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_vnd_name
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ int r;
+
+ if (!rmnet_header || !resp_rmnet)
+ return;
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ r = rmnet_vnd_get_name(rmnet_header->vnd.id, resp_rmnet->vnd.vnd_name,
+ RMNET_MAX_STR_LEN);
+
+ if (r != 0) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)->vnd);
+}
+
+/* rmnet_config_netlink_msg_handler() - Netlink message handler callback
+ * @skb: Packet containing netlink messages
+ *
+ * Standard kernel-expected format for a netlink message handler. Processes SKBs
+ * which contain RmNet data specific netlink messages.
+ */
+void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlmsg_header, *resp_nlmsg;
+ struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet;
+ int return_pid, response_data_length;
+ struct sk_buff *skb_response;
+
+ response_data_length = 0;
+ nlmsg_header = (struct nlmsghdr *)skb->data;
+ rmnet_header = (struct rmnet_nl_msg_s *)nlmsg_data(nlmsg_header);
+
+ if (!nlmsg_header->nlmsg_pid ||
+ (nlmsg_header->nlmsg_len < sizeof(struct nlmsghdr) +
+ sizeof(struct rmnet_nl_msg_s)))
+ return;
+
+ LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d",
+ nlmsg_header->nlmsg_pid,
+ nlmsg_header->nlmsg_seq,
+ nlmsg_header->nlmsg_len,
+ rmnet_header->message_type);
+
+ return_pid = nlmsg_header->nlmsg_pid;
+
+ skb_response = nlmsg_new(sizeof(struct nlmsghdr)
+ + sizeof(struct rmnet_nl_msg_s),
+ GFP_KERNEL);
+
+ if (!skb_response)
+ return;
+
+ resp_nlmsg = nlmsg_put(skb_response,
+ 0,
+ nlmsg_header->nlmsg_seq,
+ NLMSG_DONE,
+ sizeof(struct rmnet_nl_msg_s),
+ 0);
+
+ resp_rmnet = nlmsg_data(resp_nlmsg);
+
+ if (!resp_rmnet)
+ return;
+
+ resp_rmnet->message_type = rmnet_header->message_type;
+ rtnl_lock();
+ switch (rmnet_header->message_type) {
+ case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE:
+ _rmnet_netlink_associate_network_device
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE:
+ _rmnet_netlink_unassociate_network_device
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED:
+ _rmnet_netlink_get_network_device_associated
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT:
+ _rmnet_netlink_set_link_egress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT:
+ _rmnet_netlink_get_link_egress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT:
+ _rmnet_netlink_set_link_ingress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT:
+ _rmnet_netlink_get_link_ingress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_unset_logical_ep_config(rmnet_header,
+ resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_get_logical_ep_config(rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_NEW_VND:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code =
+ rmnet_create_vnd(rmnet_header->vnd.id);
+ break;
+
+ case RMNET_NETLINK_NEW_VND_WITH_PREFIX:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code = rmnet_create_vnd_prefix(
+ rmnet_header->vnd.id,
+ rmnet_header->vnd.vnd_name);
+ break;
+
+ case RMNET_NETLINK_FREE_VND:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ /* Please check rmnet_vnd_free_dev documentation regarding
+ * the below locking sequence
+ */
+ rtnl_unlock();
+ resp_rmnet->return_code = rmnet_free_vnd(rmnet_header->vnd.id);
+ rtnl_lock();
+ break;
+
+ case RMNET_NETLINK_GET_VND_NAME:
+ _rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet);
+ break;
+
+ default:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE;
+ break;
+ }
+ rtnl_unlock();
+ nlmsg_unicast(nl_socket_handle, skb_response, return_pid);
+ LOGD("%s", "Done processing command");
+}
+
+/* Configuration API */
+
+/* rmnet_unassociate_network_device() - Unassociate network device
+ * @dev: Device to unassociate
+ *
+ * Frees all structures generate for device. Unregisters rx_handler
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated
+ * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_unassociate_network_device(struct net_device *dev)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ int config_id = RMNET_LOCAL_LOGICAL_ENDPOINT;
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+
+ ASSERT_RTNL();
+
+ LOGL("(%s);", dev->name);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (!_rmnet_is_physical_endpoint_associated(dev))
+ return RMNET_CONFIG_INVALID_REQUEST;
+
+ for (; config_id < RMNET_MAX_LOGICAL_EP; config_id++) {
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+ if (epconfig_l && epconfig_l->refcount)
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+
+ if (!config)
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ kfree(config);
+
+ netdev_rx_handler_unregister(dev);
+
+ /* Explicitly release the reference from the device */
+ dev_put(dev);
+ return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_ingress_data_format() - Set ingress data format on network device
+ * @dev: Device to ingress data format on
+ * @egress_data_format: 32-bit unsigned bitmask of ingress format
+ *
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_ingress_data_format(struct net_device *dev,
+ u32 ingress_data_format)
+{
+ struct rmnet_phys_ep_conf_s *config;
+
+ ASSERT_RTNL();
+
+ LOGL("(%s,0x%08X);", dev->name, ingress_data_format);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config)
+ return RMNET_CONFIG_INVALID_REQUEST;
+
+ config->ingress_data_format = ingress_data_format;
+
+ return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_egress_data_format() - Set egress data format on network device
+ * @dev: Device to egress data format on
+ * @egress_data_format: 32-bit unsigned bitmask of egress format
+ *
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_egress_data_format(struct net_device *dev,
+ u32 egress_data_format,
+ u16 agg_size,
+ u16 agg_count)
+{
+ struct rmnet_phys_ep_conf_s *config;
+
+ ASSERT_RTNL();
+
+ LOGL("(%s,0x%08X, %d, %d);",
+ dev->name, egress_data_format, agg_size, agg_count);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config || (agg_count > RMNET_MAX_AGG_COUNT))
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ config->egress_data_format = egress_data_format;
+
+ return RMNET_CONFIG_OK;
+}
+
+/* rmnet_associate_network_device() - Associate network device
+ * @dev: Device to register with RmNet data
+ *
+ * Typically used on physical network devices. Registers RX handler and private
+ * metadata structures.
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_INVALID_REQUEST if the device to be associated is a vnd
+ * - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled
+ * - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails
+ */
+int rmnet_associate_network_device(struct net_device *dev)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ int rc;
+
+ ASSERT_RTNL();
+
+ LOGL("(%s);\n", dev->name);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (_rmnet_is_physical_endpoint_associated(dev)) {
+ LOGM("%s is already regestered", dev->name);
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ if (rmnet_vnd_is_vnd(dev)) {
+ LOGM("%s is a vnd", dev->name);
+ return RMNET_CONFIG_INVALID_REQUEST;
+ }
+
+ config = kmalloc(sizeof(*config), GFP_ATOMIC);
+
+ if (!config)
+ return RMNET_CONFIG_NOMEM;
+
+ memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s));
+ config->dev = dev;
+
+ rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
+
+ if (rc) {
+ LOGM("netdev_rx_handler_register returns %d", rc);
+ kfree(config);
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ /* Explicitly hold a reference to the device */
+ dev_hold(dev);
+ return RMNET_CONFIG_OK;
+}
+
+/* _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @epconfig: endpoing configuration structure to set
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_DEVICE_IN_USE if device already has a logical ep
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+
+ ASSERT_RTNL();
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+ config_id >= RMNET_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l)
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ if (epconfig_l->refcount)
+ return RMNET_CONFIG_DEVICE_IN_USE;
+
+ memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s));
+ if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+ epconfig_l->mux_id = 0;
+ else
+ epconfig_l->mux_id = config_id;
+
+ /* Explicitly hold a reference to the egress device */
+ dev_hold(epconfig_l->egress_dev);
+ return RMNET_CONFIG_OK;
+}
+
+/* _rmnet_unset_logical_endpoint_config() - Un-set the logical endpoing config
+ * on device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+
+ ASSERT_RTNL();
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+ config_id >= RMNET_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l || !epconfig_l->refcount)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ /* Explicitly release the reference from the egress device */
+ dev_put(epconfig_l->egress_dev);
+ memset(epconfig_l, 0, sizeof(struct rmnet_logical_ep_conf_s));
+
+ return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_logical_endpoint_config() - Set logical endpoint config on a device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @rmnet_mode: endpoint mode. Values from: rmnet_config_endpoint_modes_e
+ * @egress_device: device node to forward packet to once done processing in
+ * ingress/egress handlers
+ *
+ * Creates a logical_endpoint_config structure and fills in the information from
+ * function arguments. Calls _rmnet_set_logical_endpoint_config() to finish
+ * configuration. Network device must already have association with RmNet Data
+ * driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null
+ * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by
+ * RmNet data module
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ u8 rmnet_mode,
+ struct net_device *egress_dev)
+{
+ struct rmnet_logical_ep_conf_s epconfig;
+
+ LOGL("(%s, %d, %d, %s);",
+ dev->name, config_id, rmnet_mode, egress_dev->name);
+
+ if (!egress_dev ||
+ ((!_rmnet_is_physical_endpoint_associated(egress_dev)) &&
+ (!rmnet_vnd_is_vnd(egress_dev)))) {
+ return RMNET_CONFIG_BAD_EGRESS_DEVICE;
+ }
+
+ memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s));
+ epconfig.refcount = 1;
+ epconfig.rmnet_mode = rmnet_mode;
+ epconfig.egress_dev = egress_dev;
+
+ return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig);
+}
+
+/* rmnet_unset_logical_endpoint_config() - Un-set logical endpoing configuration
+ * on a device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ *
+ * Retrieves the logical_endpoint_config structure and frees the egress device.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id)
+{
+ LOGL("(%s, %d);", dev->name, config_id);
+
+ if (!dev ||
+ ((!_rmnet_is_physical_endpoint_associated(dev)) &&
+ (!rmnet_vnd_is_vnd(dev)))) {
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ return _rmnet_unset_logical_endpoint_config(dev, config_id);
+}
+
+/* rmnet_get_logical_endpoint_config() - Gets logical endpoing configuration
+ * for a device
+ * @dev: Device to get endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @rmnet_mode: (I/O) logical endpoint mode
+ * @egress_dev_name: (I/O) logical endpoint egress device name
+ * @egress_dev_name_size: The maximal size of the I/O egress_dev_name
+ *
+ * Retrieves the logical_endpoint_config structure.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range or
+ * if the provided buffer size for egress dev name is too short
+ */
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ u8 *rmnet_mode,
+ u8 *egress_dev_name,
+ size_t egress_dev_name_size)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+ size_t strlcpy_res = 0;
+
+ LOGL("(%s, %d);", dev->name, config_id);
+
+ if (!egress_dev_name || !rmnet_mode)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+ config_id >= RMNET_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l || !epconfig_l->refcount)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ *rmnet_mode = epconfig_l->rmnet_mode;
+
+ strlcpy_res = strlcpy(egress_dev_name, epconfig_l->egress_dev->name,
+ egress_dev_name_size);
+
+ if (strlcpy_res >= egress_dev_name_size)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ return RMNET_CONFIG_OK;
+}
+
+/* rmnet_create_vnd() - Create virtual network device node
+ * @id: RmNet virtual device node id
+ *
+ * Return:
+ * - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd(int id)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+ LOGL("(%d);", id);
+ return rmnet_vnd_create_dev(id, &dev, NULL);
+}
+
+/* rmnet_create_vnd() - Create virtual network device node
+ * @id: RmNet virtual device node id
+ * @prefix: String prefix for device name
+ *
+ * Return:
+ * - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd_prefix(int id, const char *prefix)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+ LOGL("(%d, \"%s\");", id, prefix);
+ return rmnet_vnd_create_dev(id, &dev, prefix);
+}
+
+/* rmnet_free_vnd() - Free virtual network device node
+ * @id: RmNet virtual device node id
+ *
+ * Return:
+ * - result of rmnet_vnd_free_dev()
+ */
+int rmnet_free_vnd(int id)
+{
+ LOGL("(%d);", id);
+ return rmnet_vnd_free_dev(id);
+}
+
+static void _rmnet_free_vnd_later(struct work_struct *work)
+{
+ int i;
+ struct rmnet_free_vnd_work *fwork;
+
+ fwork = container_of(work, struct rmnet_free_vnd_work, work);
+
+ for (i = 0; i < fwork->count; i++)
+ rmnet_free_vnd(fwork->vnd_id[i]);
+ kfree(fwork);
+}
+
+/* rmnet_force_unassociate_device() - Force a device to unassociate
+ * @dev: Device to unassociate
+ *
+ * Return:
+ * - void
+ */
+static void rmnet_force_unassociate_device(struct net_device *dev)
+{
+ int i, j;
+ struct net_device *vndev;
+ struct rmnet_logical_ep_conf_s *cfg;
+ struct rmnet_free_vnd_work *vnd_work;
+
+ ASSERT_RTNL();
+ if (!dev)
+ return;
+
+ if (!_rmnet_is_physical_endpoint_associated(dev)) {
+ LOGM("%s", "Called on unassociated device, skipping");
+ return;
+ }
+
+ vnd_work = kmalloc(sizeof(*vnd_work), GFP_KERNEL);
+ if (!vnd_work) {
+ LOGH("%s", "Out of Memory");
+ return;
+ }
+ INIT_WORK(&vnd_work->work, _rmnet_free_vnd_later);
+ vnd_work->count = 0;
+
+ /* Check the VNDs for offending mappings */
+ for (i = 0, j = 0; i < RMNET_MAX_VND &&
+ j < RMNET_MAX_VND; i++) {
+ vndev = rmnet_vnd_get_by_id(i);
+ if (!vndev) {
+ LOGL("VND %d not in use; skipping", i);
+ continue;
+ }
+ cfg = rmnet_vnd_get_le_config(vndev);
+ if (!cfg) {
+ LOGH("Got NULL config from VND %d", i);
+ continue;
+ }
+ if (cfg->refcount && (cfg->egress_dev == dev)) {
+ /* Make sure the device is down before clearing any of
+ * the mappings. Otherwise we could see a potential
+ * race condition if packets are actively being
+ * transmitted.
+ */
+ dev_close(vndev);
+ rmnet_unset_logical_endpoint_config
+ (vndev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ vnd_work->vnd_id[j] = i;
+ j++;
+ }
+ }
+ if (j > 0) {
+ vnd_work->count = j;
+ schedule_work(&vnd_work->work);
+ } else {
+ kfree(vnd_work);
+ }
+
+ /* Clear the mappings on the phys ep */
+ rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ for (i = 0; i < RMNET_MAX_LOGICAL_EP; i++)
+ rmnet_unset_logical_endpoint_config(dev, i);
+ rmnet_unassociate_network_device(dev);
+}
+
+/* rmnet_config_notify_cb() - Callback for netdevice notifier chain
+ * @nb: Notifier block data
+ * @event: Netdevice notifier event ID
+ * @data: Contains a net device for which we are getting notified
+ *
+ * Return:
+ * - result of NOTIFY_DONE()
+ */
+int rmnet_config_notify_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(data);
+
+ if (!dev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER_FINAL:
+ case NETDEV_UNREGISTER:
+ LOGH("Kernel is trying to unregister %s", dev->name);
+ rmnet_force_unassociate_device(dev);
+ break;
+
+ default:
+ LOGD("Unhandled event [%lu]", event);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
diff --git a/drivers/net/rmnet/rmnet_config.h b/drivers/net/rmnet/rmnet_config.h
new file mode 100644
index 000000000000..be2fc8964dad
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_config.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/skbuff.h>
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#define RMNET_MAX_LOGICAL_EP 256
+
+/* struct rmnet_logical_ep_conf_s - Logical end-point configuration
+ *
+ * @refcount: Reference count for this endpoint. 0 signifies the endpoint is not
+ * configured for use
+ * @rmnet_mode: Specifies how the traffic should be finally delivered. Possible
+ * options are available in enum rmnet_config_endpoint_modes_e
+ * @mux_id: Virtual channel ID used by MAP protocol
+ * @egress_dev: Next device to deliver the packet to. Exact usage of this
+ * parmeter depends on the rmnet_mode
+ */
+struct rmnet_logical_ep_conf_s {
+ u8 refcount;
+ u8 rmnet_mode;
+ u8 mux_id;
+ struct timespec flush_time;
+ struct net_device *egress_dev;
+};
+
+/* struct rmnet_phys_ep_conf_s - Physical endpoint configuration
+ * One instance of this structure is instantiated for each net_device associated
+ * with rmnet.
+ *
+ * @dev: The device which is associated with rmnet. Corresponds to this
+ * specific instance of rmnet_phys_ep_conf_s
+ * @local_ep: Default non-muxed endpoint. Used for non-MAP protocols/formats
+ * @muxed_ep: All multiplexed logical endpoints associated with this device
+ * @ingress_data_format: RMNET_INGRESS_FORMAT_* flags from rmnet.h
+ * @egress_data_format: RMNET_EGRESS_FORMAT_* flags from rmnet.h
+ *
+ * @egress_agg_size: Maximum size (bytes) of data which should be aggregated
+ * @egress_agg_count: Maximum count (packets) of data which should be aggregated
+ * Smaller of the two parameters above are chosen for
+ * aggregation
+ * @tail_spacing: Guaranteed padding (bytes) when de-aggregating ingress frames
+ * @agg_time: Wall clock time when aggregated frame was created
+ * @agg_last: Last time the aggregation routing was invoked
+ */
+struct rmnet_phys_ep_conf_s {
+ struct net_device *dev;
+ struct rmnet_logical_ep_conf_s local_ep;
+ struct rmnet_logical_ep_conf_s muxed_ep[RMNET_MAX_LOGICAL_EP];
+ u32 ingress_data_format;
+ u32 egress_data_format;
+};
+
+int rmnet_config_init(void);
+void rmnet_config_exit(void);
+
+int rmnet_unassociate_network_device(struct net_device *dev);
+int rmnet_set_ingress_data_format(struct net_device *dev,
+ u32 ingress_data_format);
+int rmnet_set_egress_data_format(struct net_device *dev,
+ u32 egress_data_format,
+ u16 agg_size,
+ u16 agg_count);
+int rmnet_associate_network_device(struct net_device *dev);
+int _rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ u8 rmnet_mode,
+ struct net_device *egress_dev);
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id);
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id);
+int _rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ u8 *rmnet_mode,
+ u8 *egress_dev_name,
+ size_t egress_dev_name_size);
+void rmnet_config_netlink_msg_handler (struct sk_buff *skb);
+int rmnet_config_notify_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+int rmnet_create_vnd(int id);
+int rmnet_create_vnd_prefix(int id, const char *name);
+int rmnet_free_vnd(int id);
+
+#endif /* _RMNET_CONFIG_H_ */
diff --git a/drivers/net/rmnet/rmnet_handlers.c b/drivers/net/rmnet/rmnet_handlers.c
new file mode 100644
index 000000000000..c2ade2d61e2f
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_handlers.c
@@ -0,0 +1,550 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/rmnet.h>
+#include <linux/netdev_features.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "rmnet_private.h"
+#include "rmnet_config.h"
+#include "rmnet_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_stats.h"
+#include "rmnet_handlers.h"
+
+RMNET_LOG_MODULE(RMNET_LOGMASK_HANDLER);
+
+#ifdef CONFIG_RMNET_DEBUG
+unsigned int dump_pkt_rx;
+module_param(dump_pkt_rx, uint, 0644);
+MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
+
+unsigned int dump_pkt_tx;
+module_param(dump_pkt_tx, uint, 0644);
+MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
+#endif /* CONFIG_RMNET_DEBUG */
+
+#define RMNET_IP_VERSION_4 0x40
+#define RMNET_IP_VERSION_6 0x60
+
+/* Helper Functions */
+
+/* __rmnet_set_skb_proto() - Set skb->protocol field
+ * @skb: packet being modified
+ *
+ * Peek at the first byte of the packet and set the protocol. There is not
+ * good way to determine if a packet has a MAP header. As of writing this,
+ * the reserved bit in the MAP frame will prevent it from overlapping with
+ * IPv4/IPv6 frames. This could change in the future!
+ */
+static inline void __rmnet_set_skb_proto(struct sk_buff *skb)
+{
+ switch (skb->data[0] & 0xF0) {
+ case RMNET_IP_VERSION_4:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case RMNET_IP_VERSION_6:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+}
+
+#ifdef CONFIG_RMNET_DEBUG
+/* rmnet_print_packet() - Print packet / diagnostics
+ * @skb: Packet to print
+ * @printlen: Number of bytes to print
+ * @dev: Name of interface
+ * @dir: Character representing direction (e.g.. 'r' for receive)
+ *
+ * This function prints out raw bytes in an SKB. Use of this will have major
+ * performance impacts and may even trigger watchdog resets if too much is being
+ * printed. Hence, this should always be compiled out unless absolutely needed.
+ */
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+ char buffer[200];
+ unsigned int len, printlen;
+ int i, buffloc = 0;
+
+ switch (dir) {
+ case 'r':
+ printlen = dump_pkt_rx;
+ break;
+
+ case 't':
+ printlen = dump_pkt_tx;
+ break;
+
+ default:
+ printlen = 0;
+ break;
+ }
+
+ if (!printlen)
+ return;
+
+ pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
+ dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
+ pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
+ dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
+
+ if (skb->len > 0)
+ len = skb->len;
+ else
+ len = ((unsigned int)(uintptr_t)skb->end) -
+ ((unsigned int)(uintptr_t)skb->data);
+
+ pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
+ dev, dir, len, printlen);
+
+ memset(buffer, 0, sizeof(buffer));
+ for (i = 0; (i < printlen) && (i < len); i++) {
+ if ((i % 16) == 0) {
+ pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+ memset(buffer, 0, sizeof(buffer));
+ buffloc = 0;
+ buffloc += snprintf(&buffer[buffloc],
+ sizeof(buffer) - buffloc, "%04X:",
+ i);
+ }
+
+ buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
+ " %02x", skb->data[i]);
+ }
+ pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+}
+#else
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+}
+#endif /* CONFIG_RMNET_DEBUG */
+
+/* Generic handler */
+
+/* rmnet_bridge_handler() - Bridge related functionality
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED in all cases
+ */
+static rx_handler_result_t rmnet_bridge_handler
+ (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
+{
+ if (!ep->egress_dev) {
+ LOGD("Missing egress device for packet arriving on %s",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
+ } else {
+ rmnet_egress_handler(skb, ep);
+ }
+
+ return RX_HANDLER_CONSUMED;
+}
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = 0;
+ skb->mac_len = 0;
+}
+#else
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+ skb->mac_len = 0;
+}
+#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
+
+/* __rmnet_deliver_skb() - Deliver skb
+ *
+ * Determines where to deliver skb. Options are: consume by network stack,
+ * pass to bridge handler, or pass to virtual network device
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet forwarded or dropped
+ * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
+ */
+static rx_handler_result_t __rmnet_deliver_skb
+ (struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
+{
+ switch (ep->rmnet_mode) {
+ case RMNET_EPMODE_NONE:
+ return RX_HANDLER_PASS;
+
+ case RMNET_EPMODE_BRIDGE:
+ return rmnet_bridge_handler(skb, ep);
+
+ case RMNET_EPMODE_VND:
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
+ case RX_HANDLER_CONSUMED:
+ return RX_HANDLER_CONSUMED;
+
+ case RX_HANDLER_PASS:
+ skb->pkt_type = PACKET_HOST;
+ rmnet_reset_mac_header(skb);
+ netif_receive_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+ return RX_HANDLER_PASS;
+
+ default:
+ LOGD("Unknown ep mode %d", ep->rmnet_mode);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+}
+
+/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
+ * MAP packets.
+ * @skb: Packet needing a destination.
+ * @config: Physical end point configuration that the packet arrived on.
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet forwarded/dropped
+ * - RX_HANDLER_PASS if packet should be passed up the stack by caller
+ */
+static rx_handler_result_t rmnet_ingress_deliver_packet
+ (struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config)
+{
+ if (!config) {
+ LOGD("%s", "NULL physical EP provided");
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ if (!(config->local_ep.refcount)) {
+ LOGD("Packet on %s has no local endpoint configuration",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ skb->dev = config->local_ep.egress_dev;
+
+ return __rmnet_deliver_skb(skb, &config->local_ep);
+}
+
+/* MAP handler */
+
+/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
+ * @skb: Packet being received
+ * @config: Physical endpoint configuration for the ingress device
+ *
+ * Most MAP ingress functions are processed here. Packets are processed
+ * individually; aggregated packets should use rmnet_map_ingress_handler()
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet is dropped
+ * - result of __rmnet_deliver_skb() for all other cases
+ */
+static rx_handler_result_t _rmnet_map_ingress_handler
+ (struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config)
+{
+ struct rmnet_logical_ep_conf_s *ep;
+ u8 mux_id;
+ u16 len;
+
+ if (RMNET_MAP_GET_CD_BIT(skb)) {
+ if (config->ingress_data_format
+ & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ return rmnet_map_command(skb, config);
+
+ LOGM("MAP command packet on %s; %s", skb->dev->name,
+ "Not configured for MAP commands");
+ rmnet_kfree_skb(skb,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb);
+
+ if (mux_id >= RMNET_MAX_LOGICAL_EP) {
+ LOGD("Got packet on %s with bad mux id %d",
+ skb->dev->name, mux_id);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ ep = &config->muxed_ep[mux_id];
+
+ if (!ep->refcount) {
+ LOGD("Packet on %s:%d; has no logical endpoint config",
+ skb->dev->name, mux_id);
+
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
+ skb->dev = ep->egress_dev;
+
+ /* Subtract MAP header */
+ skb_pull(skb, sizeof(struct rmnet_map_header_s));
+ skb_trim(skb, len);
+ __rmnet_set_skb_proto(skb);
+ return __rmnet_deliver_skb(skb, ep);
+}
+
+/* rmnet_map_ingress_handler() - MAP ingress handler
+ * @skb: Packet being received
+ * @config: Physical endpoint configuration for the ingress device
+ *
+ * Called if and only if MAP is configured in the ingress device's ingress data
+ * format. Deaggregation is done here, actual MAP processing is done in
+ * _rmnet_map_ingress_handler().
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED for aggregated packets
+ * - RX_HANDLER_CONSUMED for dropped packets
+ * - result of _rmnet_map_ingress_handler() for all other cases
+ */
+static rx_handler_result_t rmnet_map_ingress_handler
+ (struct sk_buff *skb, struct rmnet_phys_ep_conf_s *config)
+{
+ struct sk_buff *skbn;
+ int rc, co = 0;
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, config)) != NULL) {
+ _rmnet_map_ingress_handler(skbn, config);
+ co++;
+ }
+ LOGD("De-aggregated %d packets", co);
+ rmnet_stats_deagg_pkts(co);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
+ rc = RX_HANDLER_CONSUMED;
+ } else {
+ rc = _rmnet_map_ingress_handler(skb, config);
+ }
+
+ return rc;
+}
+
+/* rmnet_map_egress_handler() - MAP egress handler
+ * @skb: Packet being sent
+ * @config: Physical endpoint configuration for the egress device
+ * @ep: logical endpoint configuration of the packet originator
+ * (e.g.. RmNet virtual network device)
+ * @orig_dev: The originator vnd device
+ *
+ * Called if and only if MAP is configured in the egress device's egress data
+ * format. Will expand skb if there is insufficient headroom for MAP protocol.
+ * Note: headroomexpansion will incur a performance penalty.
+ *
+ * Return:
+ * - 0 on success
+ * - 1 on failure
+ */
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config,
+ struct rmnet_logical_ep_conf_s *ep,
+ struct net_device *orig_dev)
+{
+ int required_headroom, additional_header_length;
+ struct rmnet_map_header_s *map_header;
+
+ additional_header_length = 0;
+ required_headroom = sizeof(struct rmnet_map_header_s);
+
+ LOGD("headroom of %d bytes", required_headroom);
+
+ if (skb_headroom(skb) < required_headroom) {
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
+ LOGD("Failed to add headroom of %d bytes",
+ required_headroom);
+ return RMNET_MAP_CONSUMED;
+ }
+ }
+
+ map_header = rmnet_map_add_map_header
+ (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
+
+ if (!map_header) {
+ LOGD("%s", "Failed to add MAP header to egress packet");
+ return RMNET_MAP_CONSUMED;
+ }
+
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
+ if (ep->mux_id == 0xff)
+ map_header->mux_id = 0;
+ else
+ map_header->mux_id = ep->mux_id;
+ }
+
+ skb->protocol = htons(ETH_P_MAP);
+
+ return RMNET_MAP_SUCCESS;
+}
+
+/* Ingress / Egress Entry Points */
+
+/* rmnet_ingress_handler() - Ingress handler entry point
+ * @skb: Packet being received
+ *
+ * Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet is not processed by handler (caller must
+ * deal with the packet)
+ * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
+ */
+rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ struct net_device *dev;
+ int rc;
+
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ dev = skb->dev;
+ rmnet_print_packet(skb, dev->name, 'r');
+
+ config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(skb->dev->rx_handler_data);
+
+ if (!config) {
+ LOGD("%s is not associated with rmnet", skb->dev->name);
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ /* Sometimes devices operate in ethernet mode even thouth there is no
+ * ethernet header. This causes the skb->protocol to contain a bogus
+ * value and the skb->data pointer to be off by 14 bytes. Fix it if
+ * configured to do so
+ */
+ if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
+ skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
+ __rmnet_set_skb_proto(skb);
+ }
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
+ rc = rmnet_map_ingress_handler(skb, config);
+ } else {
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_MAP:
+ if (config->local_ep.rmnet_mode ==
+ RMNET_EPMODE_BRIDGE) {
+ rc = rmnet_ingress_deliver_packet(skb, config);
+ } else {
+ LOGD("MAP packet on %s; MAP not set",
+ dev->name);
+ rmnet_kfree_skb
+ (skb,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
+ rc = RX_HANDLER_CONSUMED;
+ }
+ break;
+
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ rc = rmnet_ingress_deliver_packet(skb, config);
+ break;
+
+ default:
+ LOGD("Unknown skb->proto 0x%04X",
+ ntohs(skb->protocol) & 0xFFFF);
+ rc = RX_HANDLER_PASS;
+ }
+ }
+
+ return rc;
+}
+
+/* rmnet_rx_handler() - Rx handler callback registered with kernel
+ * @pskb: Packet to be processed by rx handler
+ *
+ * Standard kernel-expected footprint for rx handlers. Calls
+ * rmnet_ingress_handler with correctly formatted arguments
+ *
+ * Return:
+ * - Whatever rmnet_ingress_handler() returns
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+ return rmnet_ingress_handler(*pskb);
+}
+
+/* rmnet_egress_handler() - Egress handler entry point
+ * @skb: packet to transmit
+ * @ep: logical endpoint configuration of the packet originator
+ * (e.g.. RmNet virtual network device)
+ *
+ * Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ struct net_device *orig_dev;
+ int rc;
+
+ orig_dev = skb->dev;
+ skb->dev = ep->egress_dev;
+
+ config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(skb->dev->rx_handler_data);
+
+ if (!config) {
+ LOGD("%s is not associated with rmnet", skb->dev->name);
+ kfree_skb(skb);
+ return;
+ }
+
+ LOGD("Packet going out on %s with egress format 0x%08X",
+ skb->dev->name, config->egress_data_format);
+
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
+ switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
+ case RMNET_MAP_CONSUMED:
+ LOGD("%s", "MAP process consumed packet");
+ return;
+
+ case RMNET_MAP_SUCCESS:
+ break;
+
+ default:
+ LOGD("MAP egress failed on packet on %s",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
+ return;
+ }
+ }
+
+ if (ep->rmnet_mode == RMNET_EPMODE_VND)
+ rmnet_vnd_tx_fixup(skb, orig_dev);
+
+ rmnet_print_packet(skb, skb->dev->name, 't');
+ rc = dev_queue_xmit(skb);
+ if (rc != 0) {
+ LOGD("Failed to queue packet for transmission on [%s]",
+ skb->dev->name);
+ }
+ rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
+}
diff --git a/drivers/net/rmnet/rmnet_handlers.h b/drivers/net/rmnet/rmnet_handlers.h
new file mode 100644
index 000000000000..43c42c2130cd
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_handlers.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#ifndef _RMNET_HANDLERS_H_
+#define _RMNET_HANDLERS_H_
+
+void rmnet_egress_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep);
+
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+
+#endif /* _RMNET_HANDLERS_H_ */
diff --git a/drivers/net/rmnet/rmnet_main.c b/drivers/net/rmnet/rmnet_main.c
new file mode 100644
index 000000000000..677791893ad4
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_main.c
@@ -0,0 +1,60 @@
+/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data generic framework
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include "rmnet_private.h"
+#include "rmnet_config.h"
+#include "rmnet_vnd.h"
+
+/* Trace Points */
+#define CREATE_TRACE_POINTS
+#include "rmnet_trace.h"
+
+/* Module Parameters */
+unsigned int rmnet_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI;
+module_param(rmnet_log_level, uint, 0644);
+MODULE_PARM_DESC(log_level, "Logging level");
+
+unsigned int rmnet_log_module_mask;
+module_param(rmnet_log_module_mask, uint, 0644);
+MODULE_PARM_DESC(rmnet_log_module_mask, "Logging module mask");
+
+/* Startup/Shutdown */
+
+/* rmnet_init() - Module initialization
+ *
+ * todo: check for (and init) startup errors
+ */
+static int __init rmnet_init(void)
+{
+ rmnet_config_init();
+ rmnet_vnd_init();
+
+ LOGL("%s", "RMNET Data driver loaded successfully");
+ return 0;
+}
+
+static void __exit rmnet_exit(void)
+{
+ rmnet_config_exit();
+ rmnet_vnd_exit();
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/rmnet/rmnet_map.h b/drivers/net/rmnet/rmnet_map.h
new file mode 100644
index 000000000000..7d533aa5fbca
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_map.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+
+struct rmnet_map_control_command_s {
+ u8 command_name;
+ u8 cmd_type:2;
+ u8 reserved:6;
+ u16 reserved2;
+ u32 transaction_id;
+ union {
+ u8 data[65528];
+ struct {
+ u16 ip_family:2;
+ u16 reserved:14;
+ u16 flow_control_seq_num;
+ u32 qos_id;
+ } flow_control;
+ };
+} __aligned(1);
+
+enum rmnet_map_results_e {
+ RMNET_MAP_SUCCESS,
+ RMNET_MAP_CONSUMED,
+ RMNET_MAP_GENERAL_FAILURE,
+ RMNET_MAP_NOT_ENABLED,
+ RMNET_MAP_FAILED_AGGREGATION,
+ RMNET_MAP_FAILED_MUX
+};
+
+enum rmnet_map_mux_errors_e {
+ RMNET_MAP_MUX_SUCCESS,
+ RMNET_MAP_MUX_INVALID_MUX_ID,
+ RMNET_MAP_MUX_INVALID_PAD_LENGTH,
+ RMNET_MAP_MUX_INVALID_PKT_LENGTH,
+ /* This should always be the last element */
+ RMNET_MAP_MUX_ENUM_LENGTH
+};
+
+enum rmnet_map_commands_e {
+ RMNET_MAP_COMMAND_NONE,
+ RMNET_MAP_COMMAND_FLOW_DISABLE,
+ RMNET_MAP_COMMAND_FLOW_ENABLE,
+ /* These should always be the last 2 elements */
+ RMNET_MAP_COMMAND_UNKNOWN,
+ RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+struct rmnet_map_header_s {
+ u8 pad_len:6;
+ u8 reserved_bit:1;
+ u8 cd_bit:1;
+ u8 mux_id;
+ u16 pkt_len;
+} __aligned(1);
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header_s *) \
+ (Y)->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *) \
+ (Y)->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *) \
+ (Y)->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \
+ ((Y)->data + \
+ sizeof(struct rmnet_map_header_s)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header_s *) \
+ (Y)->data)->pkt_len))
+
+#define RMNET_MAP_COMMAND_REQUEST 0
+#define RMNET_MAP_COMMAND_ACK 1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID 3
+
+#define RMNET_MAP_NO_PAD_BYTES 0
+#define RMNET_MAP_ADD_PAD_BYTES 1
+
+u8 rmnet_map_demultiplex(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config);
+
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen, int pad);
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config);
+
+#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/rmnet/rmnet_map_command.c b/drivers/net/rmnet/rmnet_map_command.c
new file mode 100644
index 000000000000..13bcee3cfdac
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_map_command.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet.h>
+#include <net/pkt_sched.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+#include "rmnet_stats.h"
+
+RMNET_LOG_MODULE(RMNET_LOGMASK_MAPC);
+
+unsigned long int rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH];
+module_param_array(rmnet_map_command_stats, ulong, 0, 0444);
+MODULE_PARM_DESC(rmnet_map_command_stats, "MAP command statistics");
+
+/* rmnet_map_do_flow_control() - Process MAP flow control command
+ * @skb: Socket buffer containing the MAP flow control message
+ * @config: Physical end-point configuration of ingress device
+ * @enable: boolean for enable/disable
+ *
+ * Process in-band MAP flow control messages. Assumes mux ID is mapped to a
+ * RmNet Data vitrual network device.
+ *
+ * Return:
+ * - RMNET_MAP_COMMAND_UNSUPPORTED on any error
+ * - RMNET_MAP_COMMAND_ACK on success
+ */
+static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config,
+ int enable)
+{
+ struct rmnet_map_control_command_s *cmd;
+ struct net_device *vnd;
+ struct rmnet_logical_ep_conf_s *ep;
+ u8 mux_id;
+ u16 ip_family;
+ u16 fc_seq;
+ u32 qos_id;
+ int r;
+
+ if (unlikely(!skb || !config))
+ return RX_HANDLER_CONSUMED;
+
+ mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+
+ if (mux_id >= RMNET_MAX_LOGICAL_EP) {
+ LOGD("Got packet on %s with bad mux id %d",
+ skb->dev->name, mux_id);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_BAD_MUX);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ ep = &config->muxed_ep[mux_id];
+
+ if (!ep->refcount) {
+ LOGD("Packet on %s:%d; has no logical endpoint config",
+ skb->dev->name, mux_id);
+
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ vnd = ep->egress_dev;
+
+ ip_family = cmd->flow_control.ip_family;
+ fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+ qos_id = ntohl(cmd->flow_control.qos_id);
+
+ /* Ignore the ip family and pass the sequence number for both v4 and v6
+ * sequence. User space does not support creating dedicated flows for
+ * the 2 protocols
+ */
+ r = rmnet_vnd_do_flow_control(vnd, enable);
+ LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
+ skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);
+
+ if (r) {
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+ return RMNET_MAP_COMMAND_UNSUPPORTED;
+ } else {
+ return RMNET_MAP_COMMAND_ACK;
+ }
+}
+
+/* rmnet_map_send_ack() - Send N/ACK message for MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @type: N/ACK message selector
+ * @config: Physical end-point configuration of ingress device
+ *
+ * skb is modified to contain the message type selector. The message is then
+ * transmitted on skb->dev. Note that this function grabs global Tx lock on
+ * skb->dev for latency reasons.
+ *
+ * Return:
+ * - void
+ */
+static void rmnet_map_send_ack(struct sk_buff *skb,
+ unsigned char type,
+ struct rmnet_phys_ep_conf_s *config)
+{
+ struct rmnet_map_control_command_s *cmd;
+ int xmit_status;
+
+ if (unlikely(!skb))
+ return;
+
+ skb->protocol = htons(ETH_P_MAP);
+
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+ cmd->cmd_type = type & 0x03;
+
+ netif_tx_lock(skb->dev);
+ xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+ netif_tx_unlock(skb->dev);
+
+ LOGD("MAP command ACK=%hhu sent with rc: %d", type & 0x03, xmit_status);
+}
+
+/* rmnet_map_command() - Entry point for handling MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @config: Physical end-point configuration of ingress device
+ *
+ * Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED. Command frames are always consumed.
+ */
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config)
+{
+ struct rmnet_map_control_command_s *cmd;
+ unsigned char command_name;
+ unsigned char rc = 0;
+
+ if (unlikely(!skb))
+ return RX_HANDLER_CONSUMED;
+
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+ command_name = cmd->command_name;
+
+ if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH)
+ rmnet_map_command_stats[command_name]++;
+
+ switch (command_name) {
+ case RMNET_MAP_COMMAND_FLOW_ENABLE:
+ rc = rmnet_map_do_flow_control(skb, config, 1);
+ break;
+
+ case RMNET_MAP_COMMAND_FLOW_DISABLE:
+ rc = rmnet_map_do_flow_control(skb, config, 0);
+ break;
+
+ default:
+ rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++;
+ LOGM("Uknown MAP command: %d", command_name);
+ rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+ break;
+ }
+ if (rc == RMNET_MAP_COMMAND_ACK)
+ rmnet_map_send_ack(skb, rc, config);
+ return RX_HANDLER_CONSUMED;
+}
diff --git a/drivers/net/rmnet/rmnet_map_data.c b/drivers/net/rmnet/rmnet_map_data.c
new file mode 100644
index 000000000000..1b4eda9f46a5
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_map_data.c
@@ -0,0 +1,147 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data MAP protocol
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_stats.h"
+
+RMNET_LOG_MODULE(RMNET_LOGMASK_MAPD);
+
+#define RMNET_MAP_DEAGGR_SPACING 64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
+/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
+ * @skb: Socket buffer ("packet") to modify
+ * @hdrlen: Number of bytes of header data which should not be included in
+ * MAP length field
+ * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
+ * necessary
+ *
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ *
+ * Return:
+ * - Pointer to MAP structure
+ * - 0 (null) if insufficient headroom
+ * - 0 (null) if insufficient tailroom for padding bytes
+ *
+ * todo: Parameterize skb alignment
+ */
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen, int pad)
+{
+ u32 padding, map_datalen;
+ u8 *padbytes;
+ struct rmnet_map_header_s *map_header;
+
+ if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
+ return 0;
+
+ map_datalen = skb->len - hdrlen;
+ map_header = (struct rmnet_map_header_s *)
+ skb_push(skb, sizeof(struct rmnet_map_header_s));
+ memset(map_header, 0, sizeof(struct rmnet_map_header_s));
+
+ if (pad == RMNET_MAP_NO_PAD_BYTES) {
+ map_header->pkt_len = htons(map_datalen);
+ return map_header;
+ }
+
+ padding = ALIGN(map_datalen, 4) - map_datalen;
+
+ if (padding == 0)
+ goto done;
+
+ if (skb_tailroom(skb) < padding)
+ return 0;
+
+ padbytes = (u8 *)skb_put(skb, padding);
+ LOGD("pad: %d", padding);
+ memset(padbytes, 0, padding);
+
+done:
+ map_header->pkt_len = htons(map_datalen + padding);
+ map_header->pad_len = padding & 0x3F;
+
+ return map_header;
+}
+
+/* rmnet_map_deaggregate() - Deaggregates a single packet
+ * @skb: Source socket buffer containing multiple MAP frames
+ * @config: Physical endpoint configuration of the ingress device
+ *
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ *
+ * Return:
+ * - Pointer to new skb
+ * - 0 (null) if no more aggregated packets
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_conf_s *config)
+{
+ struct sk_buff *skbn;
+ struct rmnet_map_header_s *maph;
+ u32 packet_len;
+
+ if (skb->len == 0)
+ return 0;
+
+ maph = (struct rmnet_map_header_s *)skb->data;
+ packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
+
+ if ((((int)skb->len) - ((int)packet_len)) < 0) {
+ LOGM("%s", "Got malformed packet. Dropping");
+ return 0;
+ }
+
+ skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+ if (!skbn)
+ return 0;
+
+ skbn->dev = skb->dev;
+ skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+ skb_put(skbn, packet_len);
+ memcpy(skbn->data, skb->data, packet_len);
+ skb_pull(skb, packet_len);
+
+ /* Some hardware can send us empty frames. Catch them */
+ if (ntohs(maph->pkt_len) == 0) {
+ LOGD("Dropping empty MAP frame");
+ rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
+ return 0;
+ }
+
+ return skbn;
+}
diff --git a/drivers/net/rmnet/rmnet_private.h b/drivers/net/rmnet/rmnet_private.h
new file mode 100644
index 000000000000..f27e0b3679cb
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_private.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_PRIVATE_H_
+#define _RMNET_PRIVATE_H_
+
+#define RMNET_MAX_VND 32
+#define RMNET_MAX_PACKET_SIZE 16384
+#define RMNET_DFLT_PACKET_SIZE 1500
+#define RMNET_DEV_NAME_STR "rmnet"
+#define RMNET_NEEDED_HEADROOM 16
+#define RMNET_TX_QUEUE_LEN 1000
+#define RMNET_ETHERNET_HEADER_LENGTH 14
+
+extern unsigned int rmnet_log_level;
+extern unsigned int rmnet_log_module_mask;
+
+#define RMNET_INIT_OK 0
+#define RMNET_INIT_ERROR 1
+
+#define RMNET_LOG_LVL_DBG BIT(4)
+#define RMNET_LOG_LVL_LOW BIT(3)
+#define RMNET_LOG_LVL_MED BIT(2)
+#define RMNET_LOG_LVL_HI BIT(1)
+#define RMNET_LOG_LVL_ERR BIT(0)
+
+#define RMNET_LOG_MODULE(X) \
+ static u32 rmnet_mod_mask = X
+
+#define RMNET_LOGMASK_CONFIG BIT(0)
+#define RMNET_LOGMASK_HANDLER BIT(1)
+#define RMNET_LOGMASK_VND BIT(2)
+#define RMNET_LOGMASK_MAPD BIT(3)
+#define RMNET_LOGMASK_MAPC BIT(4)
+
+#define LOGE(fmt, ...) do { if (rmnet_log_level & RMNET_LOG_LVL_ERR) \
+ pr_err("[RMNET:ERR] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGH(fmt, ...) do { if (rmnet_log_level & RMNET_LOG_LVL_HI) \
+ pr_err("[RMNET:HI] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGM(fmt, ...) do { if (rmnet_log_level & RMNET_LOG_LVL_MED) \
+ pr_warn("[RMNET:MED] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGL(fmt, ...) do { if (unlikely \
+ (rmnet_log_level & RMNET_LOG_LVL_LOW)) \
+ pr_notice("[RMNET:LOW] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+/* Don't use pr_debug as it is compiled out of the kernel. We can be sure of
+ * minimal impact as LOGD is not enabled by default.
+ */
+#define LOGD(fmt, ...) do { if (unlikely( \
+ (rmnet_log_level & RMNET_LOG_LVL_DBG) &&\
+ (rmnet_log_module_mask & rmnet_mod_mask))) \
+ pr_notice("[RMNET:DBG] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* _RMNET_PRIVATE_H_ */
diff --git a/drivers/net/rmnet/rmnet_stats.c b/drivers/net/rmnet/rmnet_stats.c
new file mode 100644
index 000000000000..d53ce38e96fe
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_stats.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include "rmnet_private.h"
+#include "rmnet_stats.h"
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+
+enum rmnet_deagg_e {
+ RMNET_STATS_AGG_BUFF,
+ RMNET_STATS_AGG_PKT,
+ RMNET_STATS_AGG_MAX
+};
+
+static DEFINE_SPINLOCK(rmnet_skb_free_lock);
+unsigned long int skb_free[RMNET_STATS_SKBFREE_MAX];
+module_param_array(skb_free, ulong, 0, 0444);
+MODULE_PARM_DESC(skb_free, "SKBs dropped or freed");
+
+static DEFINE_SPINLOCK(rmnet_queue_xmit_lock);
+unsigned long int queue_xmit[RMNET_STATS_QUEUE_XMIT_MAX * 2];
+module_param_array(queue_xmit, ulong, 0, 0444);
+MODULE_PARM_DESC(queue_xmit, "SKBs queued for transmit");
+
+static DEFINE_SPINLOCK(rmnet_deagg_count);
+unsigned long int deagg_count[RMNET_STATS_AGG_MAX];
+module_param_array(deagg_count, ulong, 0, 0444);
+MODULE_PARM_DESC(deagg_count, "SKBs De-aggregated");
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
+{
+ unsigned long flags;
+
+ if (reason >= RMNET_STATS_SKBFREE_MAX)
+ reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+ spin_lock_irqsave(&rmnet_skb_free_lock, flags);
+ skb_free[reason]++;
+ spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);
+
+ if (skb)
+ kfree_skb(skb);
+}
+
+void rmnet_stats_queue_xmit(int rc, unsigned int reason)
+{
+ unsigned long flags;
+
+ if (rc != 0)
+ reason += RMNET_STATS_QUEUE_XMIT_MAX;
+ if (reason >= RMNET_STATS_QUEUE_XMIT_MAX * 2)
+ reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+ spin_lock_irqsave(&rmnet_queue_xmit_lock, flags);
+ queue_xmit[reason]++;
+ spin_unlock_irqrestore(&rmnet_queue_xmit_lock, flags);
+}
+
+void rmnet_stats_deagg_pkts(int aggcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rmnet_deagg_count, flags);
+ deagg_count[RMNET_STATS_AGG_BUFF]++;
+ deagg_count[RMNET_STATS_AGG_PKT] += aggcount;
+ spin_unlock_irqrestore(&rmnet_deagg_count, flags);
+}
diff --git a/drivers/net/rmnet/rmnet_stats.h b/drivers/net/rmnet/rmnet_stats.h
new file mode 100644
index 000000000000..c8d0469bfe6a
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_stats.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ *
+ */
+
+#ifndef _RMNET_STATS_H_
+#define _RMNET_STATS_H_
+
+enum rmnet_skb_free_e {
+ RMNET_STATS_SKBFREE_UNKNOWN,
+ RMNET_STATS_SKBFREE_BRDG_NO_EGRESS,
+ RMNET_STATS_SKBFREE_DELIVER_NO_EP,
+ RMNET_STATS_SKBFREE_IPINGRESS_NO_EP,
+ RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX,
+ RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
+ RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC,
+ RMNET_STATS_SKBFREE_EGR_MAPFAIL,
+ RMNET_STATS_SKBFREE_VND_NO_EGRESS,
+ RMNET_STATS_SKBFREE_MAPC_BAD_MUX,
+ RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP,
+ RMNET_STATS_SKBFREE_AGG_CPY_EXPAND,
+ RMNET_STATS_SKBFREE_AGG_INTO_BUFF,
+ RMNET_STATS_SKBFREE_DEAGG_MALFORMED,
+ RMNET_STATS_SKBFREE_DEAGG_CLONE_FAIL,
+ RMNET_STATS_SKBFREE_DEAGG_UNKNOWN_IP_TYPE,
+ RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
+ RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
+ RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
+ RMNET_STATS_SKBFREE_MAX
+};
+
+enum rmnet_queue_xmit_e {
+ RMNET_STATS_QUEUE_XMIT_UNKNOWN,
+ RMNET_STATS_QUEUE_XMIT_EGRESS,
+ RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER,
+ RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT,
+ RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL,
+ RMNET_STATS_QUEUE_XMIT_AGG_SKIP,
+ RMNET_STATS_QUEUE_XMIT_MAX
+};
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason);
+void rmnet_stats_queue_xmit(int rc, unsigned int reason);
+void rmnet_stats_deagg_pkts(int aggcount);
+void rmnet_stats_agg_pkts(int aggcount);
+void rmnet_stats_dl_checksum(unsigned int rc);
+void rmnet_stats_ul_checksum(unsigned int rc);
+#endif /* _RMNET_STATS_H_ */
diff --git a/drivers/net/rmnet/rmnet_vnd.c b/drivers/net/rmnet/rmnet_vnd.c
new file mode 100644
index 000000000000..a5b1cb891798
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_vnd.c
@@ -0,0 +1,457 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data virtual network driver
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rmnet.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/spinlock.h>
+#include <net/pkt_sched.h>
+#include <linux/atomic.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+#include "rmnet_vnd.h"
+#include "rmnet_stats.h"
+
+RMNET_LOG_MODULE(RMNET_LOGMASK_VND);
+
+struct net_device *rmnet_devices[RMNET_MAX_VND];
+
+struct rmnet_vnd_private_s {
+ struct rmnet_logical_ep_conf_s local_ep;
+};
+
+/* RX/TX Fixup */
+
+/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for ingress packets
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet should continue to process in stack
+ * - RX_HANDLER_CONSUMED if packet should not be processed in stack
+ *
+ */
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ if (unlikely(!dev || !skb))
+ return RX_HANDLER_CONSUMED;
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+
+ return RX_HANDLER_PASS;
+}
+
+/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for egress packets
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet should continue to be transmitted
+ * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
+ */
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+
+ dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+ if (unlikely(!dev || !skb))
+ return RX_HANDLER_CONSUMED;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return RX_HANDLER_PASS;
+}
+
+/* Network Device Operations */
+
+/* rmnet_vnd_start_xmit() - Transmit NDO callback
+ * @skb: Socket buffer ("packet") being sent from network stack
+ * @dev: Virtual Network Device
+ *
+ * Standard network driver operations hook to transmit packets on virtual
+ * network device. Called by network stack. Packet is not transmitted directly
+ * from here; instead it is given to the rmnet egress handler.
+ *
+ * Return:
+ * - NETDEV_TX_OK under all cirumstances (cannot block/fail)
+ */
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+
+ dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+ if (dev_conf->local_ep.egress_dev) {
+ rmnet_egress_handler(skb, &dev_conf->local_ep);
+ } else {
+ dev->stats.tx_dropped++;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
+ }
+ return NETDEV_TX_OK;
+}
+
+/* rmnet_vnd_change_mtu() - Change MTU NDO callback
+ * @dev: Virtual network device
+ * @new_mtu: New MTU value to set (in bytes)
+ *
+ * Standard network driver operations hook to set the MTU. Called by kernel to
+ * set the device MTU. Checks if desired MTU is less than zero or greater than
+ * RMNET_MAX_PACKET_SIZE;
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if new_mtu is out of range
+ */
+static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops rmnet_vnd_ops = {
+ .ndo_init = 0,
+ .ndo_start_xmit = rmnet_vnd_start_xmit,
+ .ndo_change_mtu = rmnet_vnd_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/* rmnet_vnd_setup() - net_device initialization callback
+ * @dev: Virtual network device
+ *
+ * Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+static void rmnet_vnd_setup(struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+
+ LOGM("Setting up device %s", dev->name);
+
+ /* Clear out private data */
+ dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+ memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
+
+ dev->netdev_ops = &rmnet_vnd_ops;
+ dev->mtu = RMNET_DFLT_PACKET_SIZE;
+ dev->needed_headroom = RMNET_NEEDED_HEADROOM;
+ random_ether_addr(dev->dev_addr);
+ dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
+
+ /* Raw IP mode */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+}
+
+/* Exposed API */
+
+/* rmnet_vnd_exit() - Shutdown cleanup hook
+ *
+ * Called by RmNet main on module unload. Cleans up data structures and
+ * unregisters/frees net_devices.
+ */
+void rmnet_vnd_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RMNET_MAX_VND; i++)
+ if (rmnet_devices[i]) {
+ unregister_netdev(rmnet_devices[i]);
+ free_netdev(rmnet_devices[i]);
+ }
+}
+
+/* rmnet_vnd_init() - Init hook
+ *
+ * Called by RmNet main on module load. Initializes data structures
+ */
+int rmnet_vnd_init(void)
+{
+ memset(rmnet_devices, 0,
+ sizeof(struct net_device *) * RMNET_MAX_VND);
+ return 0;
+}
+
+/* rmnet_vnd_create_dev() - Create a new virtual network device node.
+ * @id: Virtual device node id
+ * @new_device: Pointer to newly created device node
+ * @prefix: Device name prefix
+ *
+ * Allocates structures for new virtual network devices. Sets the name of the
+ * new device and registers it with the network stack. Device will appear in
+ * ifconfig list after this is called. If the prefix is null, then
+ * RMNET_DEV_NAME_STR will be assumed.
+ *
+ * Return:
+ * - 0 if successful
+ * - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
+ * - RMNET_CONFIG_DEVICE_IN_USE if id already in use
+ * - RMNET_CONFIG_NOMEM if net_device allocation failed
+ * - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
+ */
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+ const char *prefix)
+{
+ struct net_device *dev;
+ char dev_prefix[IFNAMSIZ];
+ int p, rc = 0;
+
+ if (id < 0 || id >= RMNET_MAX_VND) {
+ *new_device = 0;
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ }
+
+ if (rmnet_devices[id]) {
+ *new_device = 0;
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ if (!prefix)
+ p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
+ RMNET_DEV_NAME_STR);
+ else
+ p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
+ if (p >= (IFNAMSIZ - 1)) {
+ LOGE("Specified prefix longer than IFNAMSIZ");
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ }
+
+ dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
+ dev_prefix,
+ NET_NAME_ENUM,
+ rmnet_vnd_setup);
+ if (!dev) {
+ LOGE("Failed to to allocate netdev for id %d", id);
+ *new_device = 0;
+ return RMNET_CONFIG_NOMEM;
+ }
+
+ rc = register_netdevice(dev);
+ if (rc != 0) {
+ LOGE("Failed to to register netdev [%s]", dev->name);
+ free_netdev(dev);
+ *new_device = 0;
+ rc = RMNET_CONFIG_UNKNOWN_ERROR;
+ } else {
+ rmnet_devices[id] = dev;
+ *new_device = dev;
+ LOGM("Registered device %s", dev->name);
+ }
+
+ return rc;
+}
+
+/* rmnet_vnd_free_dev() - free a virtual network device node.
+ * @id: Virtual device node id
+ *
+ * Unregisters the virtual network device node and frees it.
+ * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
+ * by the caller of the function. unregister_netdev enqueues the request to
+ * unregister the device into a TODO queue. The requests in the TODO queue
+ * are only done after rtnl mutex is unlocked, therefore free_netdev has to
+ * called after unlocking rtnl mutex.
+ *
+ * Return:
+ * - 0 if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
+ * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ */
+int rmnet_vnd_free_dev(int id)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+ struct net_device *dev;
+
+ rtnl_lock();
+ if ((id < 0) || (id >= RMNET_MAX_VND) || !rmnet_devices[id]) {
+ rtnl_unlock();
+ LOGM("Invalid id [%d]", id);
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
+ if (epconfig_l && epconfig_l->refcount) {
+ rtnl_unlock();
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ dev = rmnet_devices[id];
+ rmnet_devices[id] = 0;
+ rtnl_unlock();
+
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+ } else {
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+}
+
+/* rmnet_vnd_get_name() - Gets the string name of a VND based on ID
+ * @id: Virtual device node id
+ * @name: Buffer to store name of virtual device node
+ * @name_len: Length of name buffer
+ *
+ * Copies the name of the virtual device node into the users buffer. Will throw
+ * an error if the buffer is null, or too small to hold the device name.
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if name is null
+ * - -EINVAL if id is invalid or not in range
+ * - -EINVAL if name is too small to hold things
+ */
+int rmnet_vnd_get_name(int id, char *name, int name_len)
+{
+ int p;
+
+ if (!name) {
+ LOGM("%s", "Bad arguments; name buffer null");
+ return -EINVAL;
+ }
+
+ if ((id < 0) || (id >= RMNET_MAX_VND) || !rmnet_devices[id]) {
+ LOGM("Invalid id [%d]", id);
+ return -EINVAL;
+ }
+
+ p = strlcpy(name, rmnet_devices[id]->name, name_len);
+ if (p >= name_len) {
+ LOGM("Buffer to small (%d) to fit device name", name_len);
+ return -EINVAL;
+ }
+ LOGL("Found mapping [%d]->\"%s\"", id, name);
+
+ return 0;
+}
+
+/* rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
+ * @dev: Network device to test
+ *
+ * Searches through list of known RmNet virtual devices. This function is O(n)
+ * and should not be used in the data path.
+ *
+ * Return:
+ * - 0 if device is not RmNet virtual device
+ * - 1 if device is RmNet virtual device
+ */
+int rmnet_vnd_is_vnd(struct net_device *dev)
+{
+ /* This is not an efficient search, but, this will only be called in
+ * a configuration context, and the list is small.
+ */
+ int i;
+
+ if (!dev)
+ return 0;
+
+ for (i = 0; i < RMNET_MAX_VND; i++)
+ if (dev == rmnet_devices[i])
+ return i + 1;
+
+ return 0;
+}
+
+/* rmnet_vnd_get_le_config() - Get the logical endpoint configuration
+ * @dev: Virtual device node
+ *
+ * Gets the logical endpoint configuration for a RmNet virtual network device
+ * node. Caller should confirm that devices is a RmNet VND before calling.
+ *
+ * Return:
+ * - Pointer to logical endpoint configuration structure
+ * - 0 (null) if dev is null
+ */
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+
+ if (!dev)
+ return 0;
+
+ dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+ if (!dev_conf)
+ return 0;
+
+ return &dev_conf->local_ep;
+}
+
+/* rmnet_vnd_do_flow_control() - Process flow control request
+ * @dev: Virtual network device node to do lookup on
+ * @enable: boolean to enable/disable flow.
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if dev is not RmNet virtual network device node
+ */
+int rmnet_vnd_do_flow_control(struct net_device *dev, int enable)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+
+ if (unlikely(!dev))
+ return -EINVAL;
+
+ if (!rmnet_vnd_is_vnd(dev))
+ return -EINVAL;
+
+ dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+ if (unlikely(!dev_conf))
+ return -EINVAL;
+
+ LOGD("Setting VND TX queue state to %d", enable);
+ /* Although we expect similar number of enable/disable
+ * commands, optimize for the disable. That is more
+ * latency sensitive than enable
+ */
+ if (unlikely(enable))
+ netif_wake_queue(dev);
+ else
+ netif_stop_queue(dev);
+
+ return 0;
+}
+
+/* rmnet_vnd_get_by_id() - Get VND by array index ID
+ * @id: Virtual network deice id [0:RMNET_MAX_VND]
+ *
+ * Return:
+ * - 0 if no device or ID out of range
+ * - otherwise return pointer to VND net_device struct
+ */
+struct net_device *rmnet_vnd_get_by_id(int id)
+{
+ if (id < 0 || id >= RMNET_MAX_VND) {
+ pr_err("Bug; VND ID out of bounds");
+ return 0;
+ }
+ return rmnet_devices[id];
+}
diff --git a/drivers/net/rmnet/rmnet_vnd.h b/drivers/net/rmnet/rmnet_vnd.h
new file mode 100644
index 000000000000..428240898ff0
--- /dev/null
+++ b/drivers/net/rmnet/rmnet_vnd.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Virtual Network Device APIs
+ *
+ */
+
+#include <linux/types.h>
+
+#ifndef _RMNET_VND_H_
+#define _RMNET_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
+int rmnet_vnd_get_name(int id, char *name, int name_len);
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+ const char *prefix);
+int rmnet_vnd_free_dev(int id);
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_is_vnd(struct net_device *dev);
+int rmnet_vnd_init(void);
+void rmnet_vnd_exit(void);
+struct net_device *rmnet_vnd_get_by_id(int id);
+
+#endif /* _RMNET_VND_H_ */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 284caf81e808..486af5dac5df 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -332,5 +332,6 @@ source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
source "drivers/net/wireless/cnss/Kconfig"
+source "drivers/net/wireless/cnss_genl/Kconfig"
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 818fa279b25d..0204fc00f0c5 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -66,3 +66,4 @@ obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_CNSS) += cnss/
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
obj-$(CONFIG_CNSS_CRYPTO) += cnss_crypto/
+obj-$(CONFIG_CNSS_GENL) += cnss_genl/
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index c65cceb2e4cb..7b5fc52d269a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -35,7 +35,7 @@ ath10k_snoc_service_notifier_notify(struct notifier_block *nb,
atomic_set(&ar_snoc->fw_crashed, 1);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "PD went down %d\n",
- ar_snoc->fw_crashed);
+ atomic_read(&ar_snoc->fw_crashed));
break;
case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
ath10k_dbg(ar, ATH10K_DBG_SNOC, "Service up\n");
@@ -192,7 +192,7 @@ static int ath10k_snoc_modem_notifier_nb(struct notifier_block *nb,
atomic_set(&ar_snoc->fw_crashed, 1);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "Modem went down %d\n",
- ar_snoc->fw_crashed);
+ atomic_read(&ar_snoc->fw_crashed));
if (notif->crashed)
queue_work(ar->workqueue, &ar->restart_work);
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index acd5347f2cae..923fe470360c 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -475,22 +475,23 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
}
mutex_unlock(&wil->p2p_wdev_mutex);
- /* social scan on P2P_DEVICE is handled as p2p search */
- if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
- wil_p2p_is_social_scan(request)) {
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (!wil->p2p.p2p_dev_started) {
wil_err(wil, "P2P search requested on stopped P2P device\n");
rc = -EIO;
goto out;
}
- wil->scan_request = request;
- wil->radio_wdev = wdev;
- rc = wil_p2p_search(wil, request);
- if (rc) {
- wil->radio_wdev = wil_to_wdev(wil);
- wil->scan_request = NULL;
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wil_p2p_is_social_scan(request)) {
+ wil->scan_request = request;
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(wil, request);
+ if (rc) {
+ wil->radio_wdev = wil_to_wdev(wil);
+ wil->scan_request = NULL;
+ }
+ goto out;
}
- goto out;
}
(void)wil_p2p_stop_discovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 958c96b75fbb..01a27335ec34 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -130,9 +130,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
u32 *d = dst;
const volatile u32 __iomem *s = src;
- /* size_t is unsigned, if (count%4 != 0) it will wrap */
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
*d++ = __raw_readl(s++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = __raw_readl(s);
+
+ memcpy(d, &tmp, count);
+ }
}
void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
@@ -149,8 +155,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
volatile u32 __iomem *d = dst;
const u32 *s = src;
- for (count += 4; count > 4; count -= 4)
+ for (; count >= 4; count -= 4)
__raw_writel(*s++, d++);
+
+ if (unlikely(count)) {
+ /* count can be 1..3 */
+ u32 tmp = 0;
+
+ memcpy(&tmp, s, count);
+ __raw_writel(tmp, d);
+ }
}
void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
diff --git a/drivers/net/wireless/cnss_genl/Kconfig b/drivers/net/wireless/cnss_genl/Kconfig
new file mode 100644
index 000000000000..f1b8a586ec90
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/Kconfig
@@ -0,0 +1,7 @@
+config CNSS_GENL
+ tristate "CNSS Generic Netlink Socket Driver"
+ ---help---
+ This module creates generic netlink family "CLD80211". This can be
+ used by cld driver and userspace utilities to communicate over
+ netlink sockets. This module creates different multicast groups to
+ facilitate the same.
diff --git a/drivers/net/wireless/cnss_genl/Makefile b/drivers/net/wireless/cnss_genl/Makefile
new file mode 100644
index 000000000000..9431c9e596bb
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CNSS_GENL) := cnss_nl.o
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
new file mode 100644
index 000000000000..fafd9ce4b4c4
--- /dev/null
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/genetlink.h>
+#include <net/cnss_nl.h>
+#include <linux/module.h>
+
+#define CLD80211_GENL_NAME "cld80211"
+
+#define CLD80211_MULTICAST_GROUP_SVC_MSGS "svc_msgs"
+#define CLD80211_MULTICAST_GROUP_HOST_LOGS "host_logs"
+#define CLD80211_MULTICAST_GROUP_FW_LOGS "fw_logs"
+#define CLD80211_MULTICAST_GROUP_PER_PKT_STATS "per_pkt_stats"
+#define CLD80211_MULTICAST_GROUP_DIAG_EVENTS "diag_events"
+#define CLD80211_MULTICAST_GROUP_FATAL_EVENTS "fatal_events"
+#define CLD80211_MULTICAST_GROUP_OEM_MSGS "oem_msgs"
+
+static const struct genl_multicast_group nl_mcgrps[] = {
+ [CLD80211_MCGRP_SVC_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_SVC_MSGS},
+ [CLD80211_MCGRP_HOST_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_HOST_LOGS},
+ [CLD80211_MCGRP_FW_LOGS] = { .name =
+ CLD80211_MULTICAST_GROUP_FW_LOGS},
+ [CLD80211_MCGRP_PER_PKT_STATS] = { .name =
+ CLD80211_MULTICAST_GROUP_PER_PKT_STATS},
+ [CLD80211_MCGRP_DIAG_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_DIAG_EVENTS},
+ [CLD80211_MCGRP_FATAL_EVENTS] = { .name =
+ CLD80211_MULTICAST_GROUP_FATAL_EVENTS},
+ [CLD80211_MCGRP_OEM_MSGS] = { .name =
+ CLD80211_MULTICAST_GROUP_OEM_MSGS},
+};
+
+struct cld_ops {
+ cld80211_cb cb;
+ void *cb_ctx;
+};
+
+struct cld80211_nl_data {
+ struct cld_ops cld_ops[CLD80211_MAX_COMMANDS];
+};
+
+static struct cld80211_nl_data nl_data;
+
+static inline struct cld80211_nl_data *get_local_ctx(void)
+{
+ return &nl_data;
+}
+
+static struct genl_ops nl_ops[CLD80211_MAX_COMMANDS];
+
+/* policy for the attributes */
+static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = {
+ [CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
+ [CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
+};
+
+static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
+ struct genl_info *info)
+{
+ u8 cmd_id = ops->cmd;
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ if (cmd_id < 1 || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_err("CLD80211: Command Not supported: %u\n", cmd_id);
+ return -EOPNOTSUPP;
+ }
+ info->user_ptr[0] = nl->cld_ops[cmd_id - 1].cb;
+ info->user_ptr[1] = nl->cld_ops[cmd_id - 1].cb_ctx;
+
+ return 0;
+}
+
+/* The netlink family */
+static struct genl_family cld80211_fam = {
+ .id = GENL_ID_GENERATE,
+ .name = CLD80211_GENL_NAME,
+ .hdrsize = 0, /* no private header */
+ .version = 1, /* no particular meaning now */
+ .maxattr = CLD80211_ATTR_MAX,
+ .netnsok = true,
+ .pre_doit = cld80211_pre_doit,
+ .post_doit = NULL,
+};
+
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb func, void *cb_ctx)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: Registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = func;
+ nl->cld_ops[cmd_id - 1].cb_ctx = cb_ctx;
+
+ return 0;
+}
+EXPORT_SYMBOL(register_cld_cmd_cb);
+
+int deregister_cld_cmd_cb(u8 cmd_id)
+{
+ struct cld80211_nl_data *nl = get_local_ctx();
+
+ pr_debug("CLD80211: De-registering command: %d\n", cmd_id);
+ if (!cmd_id || cmd_id > CLD80211_MAX_COMMANDS) {
+ pr_debug("CLD80211: invalid command: %d\n", cmd_id);
+ return -EINVAL;
+ }
+
+ nl->cld_ops[cmd_id - 1].cb = NULL;
+ nl->cld_ops[cmd_id - 1].cb_ctx = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(deregister_cld_cmd_cb);
+
+struct genl_family *cld80211_get_genl_family(void)
+{
+ return &cld80211_fam;
+}
+EXPORT_SYMBOL(cld80211_get_genl_family);
+
+static int cld80211_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ cld80211_cb cld_cb;
+ void *cld_ctx;
+
+ cld_cb = info->user_ptr[0];
+
+ if (!cld_cb) {
+ pr_err("CLD80211: Not supported\n");
+ return -EOPNOTSUPP;
+ }
+ cld_ctx = info->user_ptr[1];
+
+ if (info->attrs[CLD80211_ATTR_VENDOR_DATA]) {
+ cld_cb(nla_data(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ nla_len(info->attrs[CLD80211_ATTR_VENDOR_DATA]),
+ cld_ctx, info->snd_portid);
+ } else {
+ pr_err("CLD80211: No CLD80211_ATTR_VENDOR_DATA\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __cld80211_init(void)
+{
+ int err, i;
+
+ memset(&nl_ops[0], 0, sizeof(nl_ops));
+
+ pr_info("CLD80211: Initializing\n");
+ for (i = 0; i < CLD80211_MAX_COMMANDS; i++) {
+ nl_ops[i].cmd = i + 1;
+ nl_ops[i].doit = cld80211_doit;
+ nl_ops[i].flags = GENL_ADMIN_PERM;
+ nl_ops[i].policy = cld80211_policy;
+ }
+
+ err = genl_register_family_with_ops_groups(&cld80211_fam, nl_ops,
+ nl_mcgrps);
+ if (err) {
+ pr_err("CLD80211: Failed to register cld80211 family: %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static void __cld80211_exit(void)
+{
+ genl_unregister_family(&cld80211_fam);
+}
+
+static int __init cld80211_init(void)
+{
+ return __cld80211_init();
+}
+
+static void __exit cld80211_exit(void)
+{
+ __cld80211_exit();
+}
+
+module_init(cld80211_init);
+module_exit(cld80211_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS generic netlink module");
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 584ad96c703f..eade4f85632a 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -2511,6 +2511,48 @@ int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
}
EXPORT_SYMBOL(msm_pcie_debug_info);
+#ifdef CONFIG_SYSFS
+static ssize_t msm_pcie_enumerate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
+ dev_get_drvdata(dev);
+
+ if (pcie_dev)
+ msm_pcie_enumerate(pcie_dev->rc_idx);
+
+ return count;
+}
+
+static DEVICE_ATTR(enumerate, S_IWUSR, NULL, msm_pcie_enumerate_store);
+
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+ int ret;
+
+ ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
+ if (ret)
+ PCIE_DBG_FS(dev,
+ "RC%d: failed to create sysfs enumerate node\n",
+ dev->rc_idx);
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+ if (dev->pdev)
+ device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
+}
+#else
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *dent_msm_pcie;
static struct dentry *dfile_rc_sel;
@@ -4600,6 +4642,8 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options)
do {
usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+ PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
+ dev->rc_idx, (val >> 12) & 0x3f);
} while ((!(val & XMLH_LINK_UP) ||
!msm_pcie_confirm_linkup(dev, false, false, NULL))
&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
@@ -6277,6 +6321,9 @@ static int msm_pcie_probe(struct platform_device *pdev)
msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
}
+ dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
+ msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
msm_pcie_dev[rc_idx].pdev);
@@ -6490,11 +6537,16 @@ int __init pcie_init(void)
static void __exit pcie_exit(void)
{
+ int i;
+
PCIE_GEN_DBG("pcie:%s.\n", __func__);
platform_driver_unregister(&msm_pcie_driver);
msm_pcie_debugfs_exit();
+
+ for (i = 0; i < MAX_RC_NUM; i++)
+ msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
}
subsys_initcall_sync(pcie_init);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 985c3e560c86..d94e8f9f0e12 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1482,17 +1482,24 @@ void ipa_install_dflt_flt_rules(u32 ipa_ep_idx)
void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
{
+ struct ipa_flt_tbl *tbl;
struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
mutex_lock(&ipa_ctx->lock);
if (ep->dflt_flt4_rule_hdl) {
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt4_rule_hdl = 0;
}
if (ep->dflt_flt6_rule_hdl) {
+ tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt6_rule_hdl = 0;
}
mutex_unlock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 362294b0f695..41b29335d23b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1389,16 +1389,23 @@ void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
{
struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+ struct ipa3_flt_tbl *tbl;
mutex_lock(&ipa3_ctx->lock);
if (ep->dflt_flt4_rule_hdl) {
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt4_rule_hdl = 0;
}
if (ep->dflt_flt6_rule_hdl) {
+ tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+ /* Reset the sticky flag. */
+ tbl->sticky_rear = false;
ep->dflt_flt6_rule_hdl = 0;
}
mutex_unlock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 9b48282c812c..61a870c9928b 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -36,7 +36,7 @@
#define WIGIG_VENDOR (0x1ae9)
#define WIGIG_DEVICE (0x0310)
-#define SMMU_BASE 0x10000000 /* Device address range base */
+#define SMMU_BASE 0x20000000 /* Device address range base */
#define SMMU_SIZE ((SZ_1G * 4ULL) - SMMU_BASE)
#define WIGIG_ENABLE_DELAY 50
@@ -93,9 +93,12 @@ struct msm11ad_ctx {
/* SMMU */
bool use_smmu; /* have SMMU enabled? */
- int smmu_bypass;
+ int smmu_s1_en;
int smmu_fast_map;
+ int smmu_coherent;
struct dma_iommu_mapping *mapping;
+ u32 smmu_base;
+ u32 smmu_size;
/* bus frequency scaling */
struct msm_bus_scale_pdata *bus_scale;
@@ -638,15 +641,17 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
{
int atomic_ctx = 1;
int rc;
+ int force_pt_coherent = 1;
+ int smmu_bypass = !ctx->smmu_s1_en;
if (!ctx->use_smmu)
return 0;
- dev_info(ctx->dev, "Initialize SMMU, bypass = %d, fastmap = %d\n",
- ctx->smmu_bypass, ctx->smmu_fast_map);
+ dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
+ smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
ctx->mapping = arm_iommu_create_mapping(&platform_bus_type,
- SMMU_BASE, SMMU_SIZE);
+ ctx->smmu_base, ctx->smmu_size);
if (IS_ERR_OR_NULL(ctx->mapping)) {
rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
@@ -662,23 +667,39 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
goto release_mapping;
}
- if (ctx->smmu_bypass) {
+ if (smmu_bypass) {
rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
- &ctx->smmu_bypass);
+ &smmu_bypass);
if (rc) {
dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
rc);
goto release_mapping;
}
- } else if (ctx->smmu_fast_map) {
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_FAST,
- &ctx->smmu_fast_map);
- if (rc) {
- dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
+ } else {
+ /* Set dma-coherent and page table coherency */
+ if (ctx->smmu_coherent) {
+ arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ &force_pt_coherent);
+ if (rc) {
+ dev_err(ctx->dev,
+ "Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
+ }
+
+ if (ctx->smmu_fast_map) {
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &ctx->smmu_fast_map);
+ if (rc) {
+ dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
}
}
@@ -900,6 +921,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
struct device_node *of_node = dev->of_node;
struct device_node *rc_node;
struct pci_dev *pcidev = NULL;
+ u32 smmu_mapping[2];
int rc;
u32 val;
@@ -954,8 +976,27 @@ static int msm_11ad_probe(struct platform_device *pdev)
ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
- ctx->smmu_bypass = 1;
- ctx->smmu_fast_map = 0;
+ ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
+ if (ctx->smmu_s1_en) {
+ ctx->smmu_fast_map = of_property_read_bool(
+ of_node, "qcom,smmu-fast-map");
+ ctx->smmu_coherent = of_property_read_bool(
+ of_node, "qcom,smmu-coherent");
+ }
+ rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
+ smmu_mapping, 2);
+ if (rc) {
+ dev_err(ctx->dev,
+ "Failed to read base/size smmu addresses %d, fallback to default\n",
+ rc);
+ ctx->smmu_base = SMMU_BASE;
+ ctx->smmu_size = SMMU_SIZE;
+ } else {
+ ctx->smmu_base = smmu_mapping[0];
+ ctx->smmu_size = smmu_mapping[1];
+ }
+ dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
+ ctx->smmu_base, ctx->smmu_size);
/*== execute ==*/
/* turn device on */
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index a35ed1afc720..6f9a13040cd5 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -215,7 +215,8 @@ static int msm_ext_disp_process_display(struct msm_ext_disp *ext_disp,
{
int ret = 0;
- if (!(flags & MSM_EXT_DISP_HPD_VIDEO)) {
+ if (!(flags & (MSM_EXT_DISP_HPD_VIDEO
+ | MSM_EXT_DISP_HPD_ASYNC_VIDEO))) {
pr_debug("skipping video setup for display (%s)\n",
msm_ext_disp_name(type));
goto end;
@@ -224,7 +225,8 @@ static int msm_ext_disp_process_display(struct msm_ext_disp *ext_disp,
ret = msm_ext_disp_send_cable_notification(ext_disp, state);
/* positive ret value means audio node was switched */
- if (IS_ERR_VALUE(ret) || !ret) {
+ if ((ret <= 0) ||
+ (flags & MSM_EXT_DISP_HPD_ASYNC_VIDEO)) {
pr_debug("not waiting for display\n");
goto end;
}
@@ -237,9 +239,8 @@ static int msm_ext_disp_process_display(struct msm_ext_disp *ext_disp,
goto end;
}
- ret = 0;
end:
- return ret;
+ return (ret >= 0) ? 0 : -EINVAL;
}
static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
@@ -248,7 +249,8 @@ static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
{
int ret = 0;
- if (!(flags & MSM_EXT_DISP_HPD_AUDIO)) {
+ if (!(flags & (MSM_EXT_DISP_HPD_AUDIO
+ | MSM_EXT_DISP_HPD_ASYNC_AUDIO))) {
pr_debug("skipping audio setup for display (%s)\n",
msm_ext_disp_name(type));
goto end;
@@ -257,7 +259,8 @@ static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
ret = msm_ext_disp_send_audio_notification(ext_disp, state);
/* positive ret value means audio node was switched */
- if (IS_ERR_VALUE(ret) || !ret || !ext_disp->ack_enabled) {
+ if ((ret <= 0) || !ext_disp->ack_enabled ||
+ (flags & MSM_EXT_DISP_HPD_ASYNC_AUDIO)) {
pr_debug("not waiting for audio\n");
goto end;
}
@@ -270,9 +273,8 @@ static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
goto end;
}
- ret = 0;
end:
- return ret;
+ return (ret >= 0) ? 0 : -EINVAL;
}
static bool msm_ext_disp_validate_connect(struct msm_ext_disp *ext_disp,
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 1c0eecdf162c..f2047592a94b 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -46,10 +46,13 @@
&& (value) <= (right)))
/* Awake votable reasons */
-#define SRAM_READ "fg_sram_read"
-#define SRAM_WRITE "fg_sram_write"
-#define PROFILE_LOAD "fg_profile_load"
-#define DELTA_SOC "fg_delta_soc"
+#define SRAM_READ "fg_sram_read"
+#define SRAM_WRITE "fg_sram_write"
+#define PROFILE_LOAD "fg_profile_load"
+#define DELTA_SOC "fg_delta_soc"
+
+/* Delta BSOC votable reasons */
+#define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq"
#define DEBUG_PRINT_BUFFER_SIZE 64
/* 3 byte address + 1 space character */
@@ -330,6 +333,7 @@ struct fg_chip {
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
+ struct votable *delta_bsoc_irq_en_votable;
struct fg_sram_param *sp;
struct fg_alg_flag *alg_flags;
int *debug_mask;
@@ -370,8 +374,8 @@ struct fg_chip {
bool esr_fcc_ctrl_en;
bool soc_reporting_ready;
bool esr_flt_cold_temp_en;
- bool bsoc_delta_irq_en;
bool slope_limit_en;
+ bool use_ima_single_mode;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index c00c72c5884c..8a949bfe61d0 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -48,6 +48,10 @@ static int fg_config_access_mode(struct fg_chip *chip, bool access, bool burst)
int rc;
u8 intf_ctl = 0;
+ fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "access: %d burst: %d\n",
+ access, burst);
+
+ WARN_ON(burst && chip->use_ima_single_mode);
intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
(burst ? MEM_ACS_BURST_BIT : 0);
@@ -293,7 +297,9 @@ static int fg_check_iacs_ready(struct fg_chip *chip)
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc != -EAGAIN)
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
@@ -357,7 +363,12 @@ static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc == -EAGAIN)
+ pr_err("IMA error cleared, address [%d %d] len %d\n",
+ address, offset, len);
+ else
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
@@ -365,6 +376,15 @@ static int __fg_interleaved_mem_write(struct fg_chip *chip, u16 address,
len -= num_bytes;
offset = byte_enable = 0;
+ if (chip->use_ima_single_mode && len) {
+ address++;
+ rc = fg_set_address(chip, address);
+ if (rc < 0) {
+ pr_err("failed to set address rc = %d\n", rc);
+ return rc;
+ }
+ }
+
rc = fg_check_iacs_ready(chip);
if (rc < 0) {
pr_debug("IACS_RDY failed rc=%d\n", rc);
@@ -403,22 +423,40 @@ static int __fg_interleaved_mem_read(struct fg_chip *chip, u16 address,
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
- pr_err("Failed to check for ima errors rc=%d\n", rc);
+ if (rc == -EAGAIN)
+ pr_err("IMA error cleared, address [%d %d] len %d\n",
+ address, offset, len);
+ else
+ pr_err("Failed to check for ima errors rc=%d\n",
+ rc);
return rc;
}
- if (len && len < BYTES_PER_SRAM_WORD) {
- /*
- * Move to single mode. Changing address is not
- * required here as it must be in burst mode. Address
- * will get incremented internally by FG HW once the MSB
- * of RD_DATA is read.
- */
- rc = fg_config_access_mode(chip, FG_READ, 0);
- if (rc < 0) {
- pr_err("failed to move to single mode rc=%d\n",
- rc);
- return -EIO;
+ if (chip->use_ima_single_mode) {
+ if (len) {
+ address++;
+ rc = fg_set_address(chip, address);
+ if (rc < 0) {
+ pr_err("failed to set address rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ } else {
+ if (len && len < BYTES_PER_SRAM_WORD) {
+ /*
+ * Move to single mode. Changing address is not
+ * required here as it must be in burst mode.
+ * Address will get incremented internally by FG
+ * HW once the MSB of RD_DATA is read.
+ */
+ rc = fg_config_access_mode(chip, FG_READ,
+ false);
+ if (rc < 0) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
}
}
@@ -489,6 +527,7 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
u16 address, int offset, int len, bool access)
{
int rc = 0;
+ bool burst_mode = false;
if (!is_mem_access_available(chip, access))
return -EBUSY;
@@ -503,7 +542,8 @@ static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
}
/* configure for the read/write, single/burst mode */
- rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+ burst_mode = chip->use_ima_single_mode ? false : ((offset + len) > 4);
+ rc = fg_config_access_mode(chip, access, burst_mode);
if (rc < 0) {
pr_err("failed to set memory access rc = %d\n", rc);
return rc;
@@ -583,7 +623,7 @@ retry:
if (rc < 0) {
count++;
if (rc == -EAGAIN) {
- pr_err("IMA access failed retry_count = %d\n", count);
+ pr_err("IMA read failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to read SRAM address rc = %d\n", rc);
@@ -667,8 +707,8 @@ retry:
rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
if (rc < 0) {
count++;
- if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- pr_err("IMA access failed retry_count = %d\n", count);
+ if (rc == -EAGAIN) {
+ pr_err("IMA write failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to write SRAM address rc = %d\n", rc);
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index 3652cc7802eb..b99558ed2100 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -20,7 +20,7 @@
#include <linux/pmic-voter.h>
-#define NUM_MAX_CLIENTS 8
+#define NUM_MAX_CLIENTS 16
#define DEBUG_FORCE_CLIENT "DEBUG_FORCE_CLIENT"
static DEFINE_SPINLOCK(votable_list_slock);
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 5ce74dab9aab..59216a567662 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -1054,6 +1054,25 @@ static void fg_notify_charger(struct fg_chip *chip)
fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
}
+static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
+ int enable, const char *client)
+{
+ struct fg_chip *chip = data;
+
+ if (!chip->irqs[BSOC_DELTA_IRQ].irq)
+ return 0;
+
+ if (enable) {
+ enable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+ } else {
+ disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+ }
+
+ return 0;
+}
+
static int fg_awake_cb(struct votable *votable, void *data, int awake,
const char *client)
{
@@ -1477,16 +1496,8 @@ static int fg_charge_full_update(struct fg_chip *chip)
return 0;
mutex_lock(&chip->charge_full_lock);
- if (!chip->charge_done && chip->bsoc_delta_irq_en) {
- disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = false;
- } else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
- enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
- enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = true;
- }
-
+ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER,
+ chip->charge_done, 0);
rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
&prop);
if (rc < 0) {
@@ -2198,6 +2209,17 @@ static bool is_profile_load_required(struct fg_chip *chip)
/* Check if integrity bit is set */
if (val & PROFILE_LOAD_BIT) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+
+ /* Whitelist the values */
+ val &= ~PROFILE_LOAD_BIT;
+ if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT &&
+ val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) {
+ val |= PROFILE_LOAD_BIT;
+ pr_warn("Garbage value in profile integrity word: 0x%x\n",
+ val);
+ return true;
+ }
+
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -3744,6 +3766,7 @@ static int fg_parse_dt(struct fg_chip *chip)
case PM660_SUBTYPE:
chip->sp = pmi8998_v2_sram_params;
chip->alg_flags = pmi8998_v2_alg_flags;
+ chip->use_ima_single_mode = true;
break;
default:
return -EINVAL;
@@ -4022,6 +4045,9 @@ static void fg_cleanup(struct fg_chip *chip)
if (chip->awake_votable)
destroy_votable(chip->awake_votable);
+ if (chip->delta_bsoc_irq_en_votable)
+ destroy_votable(chip->delta_bsoc_irq_en_votable);
+
if (chip->batt_id_chan)
iio_channel_release(chip->batt_id_chan);
@@ -4063,7 +4089,15 @@ static int fg_gen3_probe(struct platform_device *pdev)
chip);
if (IS_ERR(chip->awake_votable)) {
rc = PTR_ERR(chip->awake_votable);
- return rc;
+ goto exit;
+ }
+
+ chip->delta_bsoc_irq_en_votable = create_votable("FG_DELTA_BSOC_IRQ",
+ VOTE_SET_ANY,
+ fg_delta_bsoc_irq_en_cb, chip);
+ if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
+ rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+ goto exit;
}
rc = fg_parse_dt(chip);
@@ -4090,7 +4124,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
rc = fg_get_batt_id(chip);
if (rc < 0) {
pr_err("Error in getting battery id, rc:%d\n", rc);
- return rc;
+ goto exit;
}
rc = fg_get_batt_profile(chip);
@@ -4148,11 +4182,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
- if (fg_irqs[BSOC_DELTA_IRQ].irq) {
- disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
- disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
- chip->bsoc_delta_irq_en = false;
- }
+ rerun_election(chip->delta_bsoc_irq_en_votable);
rc = fg_debugfs_create(chip);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index ada231905df9..c74dc8989821 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -29,6 +29,8 @@
#define QNOVO_PTRAIN_STS 0x08
#define QNOVO_ERROR_STS 0x09
#define QNOVO_ERROR_BIT BIT(0)
+#define QNOVO_ERROR_STS2 0x0A
+#define QNOVO_ERROR_CHARGING_DISABLED BIT(1)
#define QNOVO_INT_RT_STS 0x10
#define QNOVO_INT_SET_TYPE 0x11
#define QNOVO_INT_POLARITY_HIGH 0x12
@@ -109,20 +111,6 @@ struct qnovo_dt_props {
struct device_node *revid_dev_node;
};
-enum {
- QNOVO_NO_ERR_STS_BIT = BIT(0),
-};
-
-struct chg_props {
- bool charging;
- bool usb_online;
- bool dc_online;
-};
-
-struct chg_status {
- bool ok_to_qnovo;
-};
-
struct qnovo {
int base;
struct mutex write_lock;
@@ -141,13 +129,10 @@ struct qnovo {
s64 v_gain_mega;
struct notifier_block nb;
struct power_supply *batt_psy;
- struct power_supply *usb_psy;
- struct power_supply *dc_psy;
- struct chg_props cp;
- struct chg_status cs;
struct work_struct status_change_work;
int fv_uV_request;
int fcc_uA_request;
+ bool ok_to_qnovo;
};
static int debug_mask;
@@ -272,28 +257,22 @@ static int qnovo_disable_cb(struct votable *votable, void *data, int disable,
const char *client)
{
struct qnovo *chip = data;
- int rc = 0;
+ union power_supply_propval pval = {0};
+ int rc;
- if (disable) {
- rc = qnovo_batt_psy_update(chip, true);
- if (rc < 0)
- return rc;
- }
+ if (!is_batt_available(chip))
+ return -EINVAL;
- rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
- disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+ pval.intval = !disable;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+ &pval);
if (rc < 0) {
- dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
- disable ? "disable" : "enable", rc);
- return rc;
- }
-
- if (!disable) {
- rc = qnovo_batt_psy_update(chip, false);
- if (rc < 0)
- return rc;
+ pr_err("Couldn't set prop qnovo_enable rc = %d\n", rc);
+ return -EINVAL;
}
+ rc = qnovo_batt_psy_update(chip, disable);
return rc;
}
@@ -325,36 +304,18 @@ static int qnovo_parse_dt(struct qnovo *chip)
return 0;
}
-static int qnovo_check_chg_version(struct qnovo *chip)
-{
- int rc;
-
- chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
- if (IS_ERR(chip->pmic_rev_id)) {
- rc = PTR_ERR(chip->pmic_rev_id);
- if (rc != -EPROBE_DEFER)
- pr_err("Unable to get pmic_revid rc=%d\n", rc);
- return rc;
- }
-
- if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
- && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
- chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
- }
-
- return 0;
-}
-
enum {
VER = 0,
OK_TO_QNOVO,
- ENABLE,
+ QNOVO_ENABLE,
+ PT_ENABLE,
FV_REQUEST,
FCC_REQUEST,
PE_CTRL_REG,
PE_CTRL2_REG,
PTRAIN_STS_REG,
INT_RT_STS_REG,
+ ERR_STS2_REG,
PREST1,
PPULS1,
NREST1,
@@ -394,6 +355,12 @@ struct param_info {
};
static struct param_info params[] = {
+ [PT_ENABLE] = {
+ .name = "PT_ENABLE",
+ .start_addr = QNOVO_PTRAIN_EN,
+ .num_regs = 1,
+ .units_str = "",
+ },
[FV_REQUEST] = {
.units_str = "uV",
},
@@ -424,6 +391,12 @@ static struct param_info params[] = {
.num_regs = 1,
.units_str = "",
},
+ [ERR_STS2_REG] = {
+ .name = "RAW_CHGR_ERR",
+ .start_addr = QNOVO_ERROR_STS2,
+ .num_regs = 1,
+ .units_str = "",
+ },
[PREST1] = {
.name = "PREST1",
.start_addr = QNOVO_PREST1_CTRL,
@@ -645,33 +618,73 @@ static ssize_t ok_to_qnovo_show(struct class *c, struct class_attribute *attr,
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+ return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
}
-static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
char *ubuf)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
- int val;
+ int val = get_effective_result(chip->disable_votable);
- val = get_client_vote(chip->disable_votable, USER_VOTER);
- val = !val;
- return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
+}
+
+static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ unsigned long val;
+
+ if (kstrtoul(ubuf, 0, &val))
+ return -EINVAL;
+
+ vote(chip->disable_votable, USER_VOTER, !val, 0);
+
+ return count;
}
-static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ int i = attr - qnovo_attributes;
+ struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+ u8 buf[2] = {0, 0};
+ u16 regval;
+ int rc;
+
+ rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+ if (rc < 0) {
+ pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+ return -EINVAL;
+ }
+ regval = buf[1] << 8 | buf[0];
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n",
+ (int)(regval & QNOVO_PTRAIN_EN_BIT));
+}
+
+static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
const char *ubuf, size_t count)
{
struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
unsigned long val;
- bool disable;
+ int rc = 0;
+
+ if (get_effective_result(chip->disable_votable))
+ return -EINVAL;
if (kstrtoul(ubuf, 0, &val))
return -EINVAL;
- disable = !val;
+ rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+ (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+ (bool)val ? "enable" : "disable", rc);
+ return rc;
+ }
- vote(chip->disable_votable, USER_VOTER, disable, 0);
return count;
}
@@ -707,6 +720,9 @@ static ssize_t val_store(struct class *c, struct class_attribute *attr,
if (i == FCC_REQUEST)
chip->fcc_uA_request = val;
+ if (!get_effective_result(chip->disable_votable))
+ qnovo_batt_psy_update(chip, false);
+
return count;
}
@@ -827,7 +843,11 @@ static ssize_t current_show(struct class *c, struct class_attribute *attr,
pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
return -EINVAL;
}
- regval_nA = buf[1] << 8 | buf[0];
+
+ if (buf[1] & BIT(5))
+ buf[1] |= GENMASK(7, 6);
+
+ regval_nA = (s16)(buf[1] << 8 | buf[0]);
regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
params[i].reg_to_unit_divider)
- params[i].reg_to_unit_offset;
@@ -1016,8 +1036,8 @@ static ssize_t batt_prop_show(struct class *c, struct class_attribute *attr,
static struct class_attribute qnovo_attributes[] = {
[VER] = __ATTR_RO(version),
[OK_TO_QNOVO] = __ATTR_RO(ok_to_qnovo),
- [ENABLE] = __ATTR(enable, 0644,
- enable_show, enable_store),
+ [QNOVO_ENABLE] = __ATTR_RW(qnovo_enable),
+ [PT_ENABLE] = __ATTR_RW(pt_enable),
[FV_REQUEST] = __ATTR(fv_uV_request, 0644,
val_show, val_store),
[FCC_REQUEST] = __ATTR(fcc_uA_request, 0644,
@@ -1030,6 +1050,8 @@ static struct class_attribute qnovo_attributes[] = {
reg_show, NULL),
[INT_RT_STS_REG] = __ATTR(INT_RT_STS_REG, 0444,
reg_show, NULL),
+ [ERR_STS2_REG] = __ATTR(ERR_STS2_REG, 0444,
+ reg_show, NULL),
[PREST1] = __ATTR(PREST1_mS, 0644,
time_show, time_store),
[PPULS1] = __ATTR(PPULS1_uC, 0644,
@@ -1050,7 +1072,7 @@ static struct class_attribute qnovo_attributes[] = {
time_show, NULL),
[PREST2] = __ATTR(PREST2_mS, 0644,
time_show, time_store),
- [PPULS2] = __ATTR(PPULS2_mS, 0644,
+ [PPULS2] = __ATTR(PPULS2_uC, 0644,
coulomb_show, coulomb_store),
[NREST2] = __ATTR(NREST2_mS, 0644,
time_show, time_store),
@@ -1081,95 +1103,40 @@ static struct class_attribute qnovo_attributes[] = {
__ATTR_NULL,
};
-static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+static int qnovo_update_status(struct qnovo *chip)
{
- union power_supply_propval pval;
u8 val = 0;
int rc;
+ bool charging;
+ bool changed = false;
- cp->charging = true;
- rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+ rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
if (rc < 0) {
pr_err("Couldn't read error sts rc = %d\n", rc);
- cp->charging = false;
+ charging = false;
} else {
- cp->charging = (!(val & QNOVO_ERROR_BIT));
+ charging = !(val & QNOVO_ERROR_CHARGING_DISABLED);
}
- if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
- /*
- * on v1.0 and v1.1 pmic's force charging to true
- * if things are not good to charge s/w gets a PTRAIN_DONE
- * interrupt
- */
- cp->charging = true;
- }
+ if (chip->ok_to_qnovo ^ charging) {
- cp->usb_online = false;
- if (!chip->usb_psy)
- chip->usb_psy = power_supply_get_by_name("usb");
- if (chip->usb_psy) {
- rc = power_supply_get_property(chip->usb_psy,
- POWER_SUPPLY_PROP_ONLINE, &pval);
- if (rc < 0)
- pr_err("Couldn't read usb online rc = %d\n", rc);
- else
- cp->usb_online = (bool)pval.intval;
- }
+ vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !charging, 0);
+ if (!charging)
+ vote(chip->disable_votable, USER_VOTER, true, 0);
- cp->dc_online = false;
- if (!chip->dc_psy)
- chip->dc_psy = power_supply_get_by_name("dc");
- if (chip->dc_psy) {
- rc = power_supply_get_property(chip->dc_psy,
- POWER_SUPPLY_PROP_ONLINE, &pval);
- if (rc < 0)
- pr_err("Couldn't read dc online rc = %d\n", rc);
- else
- cp->dc_online = (bool)pval.intval;
+ chip->ok_to_qnovo = charging;
+ changed = true;
}
-}
-static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
- struct chg_status *cs)
-{
- cs->ok_to_qnovo = false;
-
- if (cp->charging &&
- (cp->usb_online || cp->dc_online))
- cs->ok_to_qnovo = true;
+ return changed;
}
static void status_change_work(struct work_struct *work)
{
struct qnovo *chip = container_of(work,
struct qnovo, status_change_work);
- bool notify_uevent = false;
- struct chg_props cp;
- struct chg_status cs;
-
- get_chg_props(chip, &cp);
- get_chg_status(chip, &cp, &cs);
-
- if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
- /*
- * when it is not okay to Qnovo charge, disable both voters,
- * so that when it becomes okay to Qnovo charge the user voter
- * has to specifically enable its vote to being Qnovo charging
- */
- if (!cs.ok_to_qnovo) {
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
- vote(chip->disable_votable, USER_VOTER, 1, 0);
- } else {
- vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
- }
- notify_uevent = true;
- }
- memcpy(&chip->cp, &cp, sizeof(struct chg_props));
- memcpy(&chip->cs, &cs, sizeof(struct chg_status));
-
- if (notify_uevent)
+ if (qnovo_update_status(chip))
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
}
@@ -1181,8 +1148,8 @@ static int qnovo_notifier_call(struct notifier_block *nb,
if (ev != PSY_EVENT_PROP_CHANGED)
return NOTIFY_OK;
- if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+
+ if (strcmp(psy->desc->name, "battery") == 0)
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
@@ -1192,8 +1159,7 @@ static irqreturn_t handle_ptrain_done(int irq, void *data)
{
struct qnovo *chip = data;
- /* disable user voter here */
- vote(chip->disable_votable, USER_VOTER, 0, 0);
+ qnovo_update_status(chip);
kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
return IRQ_HANDLED;
}
@@ -1206,7 +1172,7 @@ static int qnovo_hw_init(struct qnovo *chip)
u8 vadc_offset, vadc_gain;
u8 val;
- vote(chip->disable_votable, USER_VOTER, 1, 0);
+ vote(chip->disable_votable, USER_VOTER, true, 0);
val = 0;
rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
@@ -1318,6 +1284,9 @@ static int qnovo_request_interrupts(struct qnovo *chip)
irq_ptrain_done, rc);
return rc;
}
+
+ enable_irq_wake(irq_ptrain_done);
+
return rc;
}
@@ -1347,13 +1316,6 @@ static int qnovo_probe(struct platform_device *pdev)
return rc;
}
- rc = qnovo_check_chg_version(chip);
- if (rc < 0) {
- if (rc != -EPROBE_DEFER)
- pr_err("Couldn't check version rc=%d\n", rc);
- return rc;
- }
-
/* set driver data before resources request it */
platform_set_drvdata(pdev, chip);
@@ -1399,6 +1361,8 @@ static int qnovo_probe(struct platform_device *pdev)
goto unreg_notifier;
}
+ device_init_wakeup(chip->dev, true);
+
return rc;
unreg_notifier:
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index ee4b78181ade..e8249163e948 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -999,6 +999,8 @@ static int smb2_batt_set_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_QNOVO:
chg->qnovo_fcc_ua = val->intval;
+ vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
+ val->intval != -EINVAL && val->intval < 2000000, 0);
rc = rerun_election(chg->fcc_votable);
break;
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 50af1087278a..51c87f963307 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -725,7 +725,6 @@ void smblib_suspend_on_debug_battery(struct smb_charger *chg)
int smblib_rerun_apsd_if_required(struct smb_charger *chg)
{
- const struct apsd_result *apsd_result;
union power_supply_propval val;
int rc;
@@ -738,12 +737,6 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
if (!val.intval)
return 0;
- apsd_result = smblib_get_apsd_result(chg);
- if ((apsd_result->pst != POWER_SUPPLY_TYPE_UNKNOWN)
- && (apsd_result->pst != POWER_SUPPLY_TYPE_USB))
- /* if type is not usb or unknown no need to rerun apsd */
- return 0;
-
/* fetch the DPDM regulator */
if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
"dpdm-supply", NULL)) {
@@ -1346,6 +1339,14 @@ int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
if (chg->otg_en)
goto unlock;
+ if (!chg->usb_icl_votable) {
+ chg->usb_icl_votable = find_votable("USB_ICL");
+
+ if (!chg->usb_icl_votable)
+ return -EINVAL;
+ }
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, true, 0);
+
rc = _smblib_vbus_regulator_enable(rdev);
if (rc >= 0)
chg->otg_en = true;
@@ -1409,6 +1410,8 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
if (rc >= 0)
chg->otg_en = false;
+ if (chg->usb_icl_votable)
+ vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
unlock:
mutex_unlock(&chg->otg_oc_lock);
return rc;
@@ -2477,6 +2480,22 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
return -EINVAL;
}
+ if (power_role == UFP_EN_CMD_BIT) {
+ /* disable PBS workaround when forcing sink mode */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ } else {
+ /* restore it back to 0xA5 */
+ rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+ rc);
+ }
+ }
+
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, power_role);
if (rc < 0) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 0645b60310b3..048e7c2b4091 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -37,6 +37,7 @@ enum print_reason {
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define PL_QNOVO_VOTER "PL_QNOVO_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define USBIN_V_VOTER "USBIN_V_VOTER"
#define CHG_STATE_VOTER "CHG_STATE_VOTER"
@@ -50,6 +51,7 @@ enum print_reason {
#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
+#define USBIN_USBIN_BOOST_VOTER "USBIN_USBIN_BOOST_VOTER"
#define HVDCP_INDIRECT_VOTER "HVDCP_INDIRECT_VOTER"
#define MICRO_USB_VOTER "MICRO_USB_VOTER"
#define DEBUG_BOARD_VOTER "DEBUG_BOARD_VOTER"
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index f7c13390d477..b79060094cf6 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -1019,6 +1019,8 @@ enum {
#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG (MISC_BASE + 0xA0)
#define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG (MISC_BASE + 0xA1)
+#define TM_IO_DTEST4_SEL (MISC_BASE + 0xE9)
+
/* CHGR FREQ Peripheral registers */
#define FREQ_CLK_DIV_REG (CHGR_FREQ_BASE + 0x50)
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index c13f5103b02e..4916c87aced8 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -535,6 +535,8 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
@@ -574,6 +576,19 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smblib_get_usb_suspend(chg, &val->intval);
break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ rc = smblib_get_prop_input_current_limited(chg, val);
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+ &val->intval);
+ else
+ val->intval = 0;
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
@@ -653,6 +668,11 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+ val->intval);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
break;
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index cdd6988418f7..24053c853a83 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -693,6 +693,7 @@ enum ssr_command {
* edge: The G-Link edge name for the channel associated with
* this callback data
* do_cleanup_data: Structure containing the G-Link SSR do_cleanup message.
+ * cb_kref: Kref object to maintain cb_data reference.
*/
struct ssr_notify_data {
bool tx_done;
@@ -700,6 +701,7 @@ struct ssr_notify_data {
bool responded;
const char *edge;
struct do_cleanup_msg *do_cleanup_data;
+ struct kref cb_kref;
};
/**
@@ -734,6 +736,7 @@ struct subsys_info {
int notify_list_len;
bool link_up;
spinlock_t link_up_lock;
+ spinlock_t cb_lock;
};
/**
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index 5e2dbc8b1d20..7e23b0bc3852 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -115,6 +115,44 @@ static LIST_HEAD(subsystem_list);
static atomic_t responses_remaining = ATOMIC_INIT(0);
static wait_queue_head_t waitqueue;
+/**
+ * cb_data_release() - Free cb_data and set to NULL
+ * @kref_ptr: pointer to kref.
+ *
+ * This function releses cb_data.
+ */
+static inline void cb_data_release(struct kref *kref_ptr)
+{
+ struct ssr_notify_data *cb_data;
+
+ cb_data = container_of(kref_ptr, struct ssr_notify_data, cb_kref);
+ kfree(cb_data);
+}
+
+/**
+ * check_and_get_cb_data() - Try to get reference to kref of cb_data
+ * @ss_info: pointer to subsystem info structure.
+ *
+ * Return: NULL is cb_data is NULL, pointer to cb_data otherwise
+ */
+static struct ssr_notify_data *check_and_get_cb_data(
+ struct subsys_info *ss_info)
+{
+ struct ssr_notify_data *cb_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
+ if (ss_info->cb_data == NULL) {
+ GLINK_SSR_LOG("<SSR> %s: cb_data is NULL\n", __func__);
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+ return 0;
+ }
+ kref_get(&ss_info->cb_data->cb_kref);
+ cb_data = ss_info->cb_data;
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
+ return cb_data;
+}
+
static void rx_done_cb_worker(struct work_struct *work)
{
struct rx_done_ch_work *rx_done_work =
@@ -338,8 +376,10 @@ void close_ch_worker(struct work_struct *work)
ss_info->link_state_handle = link_state_handle;
BUG_ON(!ss_info->cb_data);
- kfree(ss_info->cb_data);
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
+ kref_put(&ss_info->cb_data->cb_kref, cb_data_release);
ss_info->cb_data = NULL;
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
kfree(close_work);
}
@@ -507,13 +547,18 @@ int notify_for_subsystem(struct subsys_info *ss_info)
return -ENODEV;
}
handle = ss_info_channel->handle;
- ss_leaf_entry->cb_data = ss_info_channel->cb_data;
+ ss_leaf_entry->cb_data = check_and_get_cb_data(
+ ss_info_channel);
+ if (!ss_leaf_entry->cb_data) {
+ GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+ atomic_dec(&responses_remaining);
+ continue;
+ }
spin_lock_irqsave(&ss_info->link_up_lock, flags);
if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
- !ss_info_channel->cb_data ||
!ss_info_channel->link_up ||
- ss_info_channel->cb_data->event
+ ss_leaf_entry->cb_data->event
!= GLINK_CONNECTED) {
GLINK_SSR_LOG(
@@ -526,6 +571,8 @@ int notify_for_subsystem(struct subsys_info *ss_info)
spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
@@ -536,6 +583,8 @@ int notify_for_subsystem(struct subsys_info *ss_info)
GLINK_SSR_ERR(
"%s %s: Could not allocate do_cleanup_msg\n",
"<SSR>", __func__);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
return -ENOMEM;
}
@@ -567,6 +616,8 @@ int notify_for_subsystem(struct subsys_info *ss_info)
__func__);
}
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
@@ -596,10 +647,12 @@ int notify_for_subsystem(struct subsys_info *ss_info)
__func__);
}
atomic_dec(&responses_remaining);
+ kref_put(&ss_leaf_entry->cb_data->cb_kref,
+ cb_data_release);
continue;
}
-
sequence_number++;
+ kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
}
wait_ret = wait_event_timeout(waitqueue,
@@ -608,6 +661,21 @@ int notify_for_subsystem(struct subsys_info *ss_info)
list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
notify_list_node) {
+ ss_info_channel =
+ get_info_for_subsystem(ss_leaf_entry->ssr_name);
+ if (ss_info_channel == NULL) {
+ GLINK_SSR_ERR(
+ "<SSR> %s: unable to find subsystem name\n",
+ __func__);
+ continue;
+ }
+
+ ss_leaf_entry->cb_data = check_and_get_cb_data(
+ ss_info_channel);
+ if (!ss_leaf_entry->cb_data) {
+ GLINK_SSR_LOG("<SSR> %s: CB data is NULL\n", __func__);
+ continue;
+ }
if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
&& !ss_leaf_entry->cb_data->responded) {
GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
@@ -626,6 +694,7 @@ int notify_for_subsystem(struct subsys_info *ss_info)
if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
ss_leaf_entry->cb_data->responded = false;
+ kref_put(&ss_leaf_entry->cb_data->cb_kref, cb_data_release);
}
complete(&notifications_successful_complete);
return 0;
@@ -644,6 +713,7 @@ static int configure_and_open_channel(struct subsys_info *ss_info)
struct glink_open_config open_cfg;
struct ssr_notify_data *cb_data = NULL;
void *handle = NULL;
+ unsigned long flags;
if (!ss_info) {
GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
@@ -660,7 +730,10 @@ static int configure_and_open_channel(struct subsys_info *ss_info)
cb_data->responded = false;
cb_data->event = GLINK_SSR_EVENT_INIT;
cb_data->edge = ss_info->edge;
+ spin_lock_irqsave(&ss_info->cb_lock, flags);
ss_info->cb_data = cb_data;
+ kref_init(&cb_data->cb_kref);
+ spin_unlock_irqrestore(&ss_info->cb_lock, flags);
memset(&open_cfg, 0, sizeof(struct glink_open_config));
@@ -876,6 +949,7 @@ static int glink_ssr_probe(struct platform_device *pdev)
ss_info->link_state_handle = NULL;
ss_info->cb_data = NULL;
spin_lock_init(&ss_info->link_up_lock);
+ spin_lock_init(&ss_info->cb_lock);
nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
if (!nb) {
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 16ab2400cd69..70874da9d176 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -62,7 +62,7 @@ module_param(qmi_timeout, ulong, 0600);
#define WLFW_CLIENT_ID 0x4b4e454c
#define MAX_PROP_SIZE 32
#define NUM_LOG_PAGES 10
-#define NUM_REG_LOG_PAGES 4
+#define NUM_LOG_LONG_PAGES 4
#define ICNSS_MAGIC 0x5abc5abc
#define ICNSS_SERVICE_LOCATION_CLIENT_NAME "ICNSS-WLAN"
@@ -77,6 +77,11 @@ module_param(qmi_timeout, ulong, 0600);
ipc_log_string(icnss_ipc_log_context, _x); \
} while (0)
+#define icnss_ipc_log_long_string(_x...) do { \
+ if (icnss_ipc_log_long_context) \
+ ipc_log_string(icnss_ipc_log_long_context, _x); \
+ } while (0)
+
#define icnss_pr_err(_fmt, ...) do { \
pr_err(_fmt, ##__VA_ARGS__); \
icnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
@@ -101,6 +106,12 @@ module_param(qmi_timeout, ulong, 0600);
##__VA_ARGS__); \
} while (0)
+#define icnss_pr_vdbg(_fmt, ...) do { \
+ pr_debug(_fmt, ##__VA_ARGS__); \
+ icnss_ipc_log_long_string("DBG: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
#ifdef CONFIG_ICNSS_DEBUG
#define ICNSS_ASSERT(_condition) do { \
if (!(_condition)) { \
@@ -138,6 +149,7 @@ uint64_t dynamic_feature_mask = QMI_WLFW_FW_REJUVENATE_V01;
module_param(dynamic_feature_mask, ullong, 0600);
void *icnss_ipc_log_context;
+void *icnss_ipc_log_long_context;
#define ICNSS_EVENT_PENDING 2989
@@ -367,7 +379,7 @@ static void icnss_pm_stay_awake(struct icnss_priv *priv)
if (atomic_inc_return(&priv->pm_count) != 1)
return;
- icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+ icnss_pr_vdbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
atomic_read(&priv->pm_count));
pm_stay_awake(&priv->pdev->dev);
@@ -384,7 +396,7 @@ static void icnss_pm_relax(struct icnss_priv *priv)
if (r != 0)
return;
- icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+ icnss_pr_vdbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
atomic_read(&priv->pm_count));
pm_relax(&priv->pdev->dev);
@@ -718,7 +730,7 @@ static int icnss_vreg_on(struct icnss_priv *priv)
if (!vreg_info->reg)
continue;
- icnss_pr_dbg("Regulator %s being enabled\n", vreg_info->name);
+ icnss_pr_vdbg("Regulator %s being enabled\n", vreg_info->name);
ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
vreg_info->max_v);
@@ -780,7 +792,7 @@ static int icnss_vreg_off(struct icnss_priv *priv)
if (!vreg_info->reg)
continue;
- icnss_pr_dbg("Regulator %s being disabled\n", vreg_info->name);
+ icnss_pr_vdbg("Regulator %s being disabled\n", vreg_info->name);
ret = regulator_disable(vreg_info->reg);
if (ret)
@@ -814,7 +826,7 @@ static int icnss_clk_init(struct icnss_priv *priv)
if (!clk_info->handle)
continue;
- icnss_pr_dbg("Clock %s being enabled\n", clk_info->name);
+ icnss_pr_vdbg("Clock %s being enabled\n", clk_info->name);
if (clk_info->freq) {
ret = clk_set_rate(clk_info->handle, clk_info->freq);
@@ -861,7 +873,7 @@ static int icnss_clk_deinit(struct icnss_priv *priv)
if (!clk_info->handle)
continue;
- icnss_pr_dbg("Clock %s being disabled\n", clk_info->name);
+ icnss_pr_vdbg("Clock %s being disabled\n", clk_info->name);
clk_disable_unprepare(clk_info->handle);
}
@@ -1734,7 +1746,7 @@ static void icnss_qmi_wlfw_clnt_notify_work(struct work_struct *work)
if (!penv || !penv->wlfw_clnt)
return;
- icnss_pr_dbg("Receiving Event in work queue context\n");
+ icnss_pr_vdbg("Receiving Event in work queue context\n");
do {
} while ((ret = qmi_recv_msg(penv->wlfw_clnt)) == 0);
@@ -1742,13 +1754,13 @@ static void icnss_qmi_wlfw_clnt_notify_work(struct work_struct *work)
if (ret != -ENOMSG)
icnss_pr_err("Error receiving message: %d\n", ret);
- icnss_pr_dbg("Receiving Event completed\n");
+ icnss_pr_vdbg("Receiving Event completed\n");
}
static void icnss_qmi_wlfw_clnt_notify(struct qmi_handle *handle,
enum qmi_event_type event, void *notify_priv)
{
- icnss_pr_dbg("QMI client notify: %d\n", event);
+ icnss_pr_vdbg("QMI client notify: %d\n", event);
if (!penv || !penv->wlfw_clnt)
return;
@@ -2309,7 +2321,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
struct icnss_priv *priv = container_of(nb, struct icnss_priv,
modem_ssr_nb);
- icnss_pr_dbg("Modem-Notify: event %lu\n", code);
+ icnss_pr_vdbg("Modem-Notify: event %lu\n", code);
if (code == SUBSYS_AFTER_SHUTDOWN &&
notif->crashed == CRASH_STATUS_ERR_FATAL) {
@@ -2656,7 +2668,7 @@ int icnss_ce_request_irq(unsigned int ce_id,
goto out;
}
- icnss_pr_dbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+ icnss_pr_vdbg("CE request IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
icnss_pr_err("Invalid CE ID, ce_id: %d\n", ce_id);
@@ -2682,7 +2694,7 @@ int icnss_ce_request_irq(unsigned int ce_id,
irq_entry->irq = irq;
irq_entry->handler = handler;
- icnss_pr_dbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
+ icnss_pr_vdbg("IRQ requested: %d, ce_id: %d\n", irq, ce_id);
penv->stats.ce_irqs[ce_id].request++;
out:
@@ -2701,7 +2713,7 @@ int icnss_ce_free_irq(unsigned int ce_id, void *ctx)
goto out;
}
- icnss_pr_dbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
+ icnss_pr_vdbg("CE free IRQ: %d, state: 0x%lx\n", ce_id, penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
icnss_pr_err("Invalid CE ID to free, ce_id: %d\n", ce_id);
@@ -2735,7 +2747,7 @@ void icnss_enable_irq(unsigned int ce_id)
return;
}
- icnss_pr_dbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+ icnss_pr_vdbg("Enable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -2759,7 +2771,7 @@ void icnss_disable_irq(unsigned int ce_id)
return;
}
- icnss_pr_dbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
+ icnss_pr_vdbg("Disable IRQ: ce_id: %d, state: 0x%lx\n", ce_id,
penv->state);
if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS) {
@@ -4259,7 +4271,7 @@ static int icnss_pm_suspend(struct device *dev)
return -EINVAL;
}
- icnss_pr_dbg("PM Suspend, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM Suspend, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->pm_suspend ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -4288,7 +4300,7 @@ static int icnss_pm_resume(struct device *dev)
return -EINVAL;
}
- icnss_pr_dbg("PM resume, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM resume, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->pm_resume ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -4317,7 +4329,7 @@ static int icnss_pm_suspend_noirq(struct device *dev)
return -EINVAL;
}
- icnss_pr_dbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM suspend_noirq, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->suspend_noirq ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -4346,7 +4358,7 @@ static int icnss_pm_resume_noirq(struct device *dev)
return -EINVAL;
}
- icnss_pr_dbg("PM resume_noirq, state: 0x%lx\n", priv->state);
+ icnss_pr_vdbg("PM resume_noirq, state: 0x%lx\n", priv->state);
if (!priv->ops || !priv->ops->resume_noirq ||
!test_bit(ICNSS_DRIVER_PROBED, &priv->state))
@@ -4397,6 +4409,11 @@ static int __init icnss_initialize(void)
if (!icnss_ipc_log_context)
icnss_pr_err("Unable to create log context\n");
+ icnss_ipc_log_long_context = ipc_log_context_create(NUM_LOG_LONG_PAGES,
+ "icnss_long", 0);
+ if (!icnss_ipc_log_long_context)
+ icnss_pr_err("Unable to create log long context\n");
+
return platform_driver_register(&icnss_driver);
}
@@ -4405,6 +4422,8 @@ static void __exit icnss_exit(void)
platform_driver_unregister(&icnss_driver);
ipc_log_context_destroy(icnss_ipc_log_context);
icnss_ipc_log_context = NULL;
+ ipc_log_context_destroy(icnss_ipc_log_long_context);
+ icnss_ipc_log_long_context = NULL;
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index c252fa9d1a96..6e5ddc4a3a7d 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -465,6 +465,8 @@ static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
if (region == NULL) {
pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
size);
+ priv->region_start = 0;
+ priv->region_end = 0;
return -ENOMEM;
}
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index b120883afbb0..a59b436234c7 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -626,9 +626,11 @@ static int __init audio_notifier_late_init(void)
* If pdr registration failed, register clients on next service
* Do in late init to ensure that SSR subsystem is initialized
*/
+ mutex_lock(&notifier_mutex);
if (!audio_notifer_is_service_enabled(AUDIO_NOTIFIER_PDR_SERVICE))
audio_notifer_reg_all_clients();
+ mutex_unlock(&notifier_mutex);
return 0;
}
late_initcall(audio_notifier_late_init);
diff --git a/drivers/soc/qcom/qpnp-haptic.c b/drivers/soc/qcom/qpnp-haptic.c
index f0f9306ebe47..c86eebcd390f 100644
--- a/drivers/soc/qcom/qpnp-haptic.c
+++ b/drivers/soc/qcom/qpnp-haptic.c
@@ -64,6 +64,7 @@
#define QPNP_HAP_ACT_TYPE_MASK BIT(0)
#define QPNP_HAP_LRA 0x0
#define QPNP_HAP_ERM 0x1
+#define QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT BIT(3)
#define QPNP_HAP_AUTO_RES_MODE_MASK GENMASK(6, 4)
#define QPNP_HAP_AUTO_RES_MODE_SHIFT 4
#define QPNP_HAP_PM660_AUTO_RES_MODE_BIT BIT(7)
@@ -308,6 +309,7 @@ struct qpnp_pwm_info {
* @ reg_play - play register
* @ lra_res_cal_period - period for resonance calibration
* @ sc_duration - counter to determine the duration of short circuit condition
+ * @ lra_hw_auto_resonance - enable hardware auto resonance
* @ state - current state of haptics
* @ wf_update - waveform update flag
* @ pwm_cfg_state - pwm mode configuration state
@@ -373,6 +375,7 @@ struct qpnp_hap {
u8 pmic_subtype;
u8 auto_res_mode;
u8 clk_trim_error_code;
+ bool lra_hw_auto_resonance;
bool vcc_pon_enabled;
bool state;
bool manage_pon_supply;
@@ -724,6 +727,15 @@ static int qpnp_hap_lra_auto_res_config(struct qpnp_hap *hap)
return rc;
}
+ if (hap->lra_hw_auto_resonance) {
+ rc = qpnp_hap_masked_write_reg(hap,
+ QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT,
+ QPNP_HAP_AUTO_RES_CTRL(hap->base),
+ QPNP_HAP_PM660_HW_AUTO_RES_MODE_BIT);
+ if (rc)
+ return rc;
+ }
+
if (hap->lra_res_cal_period < QPNP_HAP_RES_CAL_PERIOD_MIN)
hap->lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MIN;
@@ -1628,7 +1640,8 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
return rc;
}
if (hap->act_type == QPNP_HAP_LRA &&
- hap->correct_lra_drive_freq) {
+ hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance) {
/*
* Start timer to poll Auto Resonance error bit
*/
@@ -1646,13 +1659,15 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
if (hap->act_type == QPNP_HAP_LRA &&
hap->correct_lra_drive_freq &&
- (hap->status_flags & AUTO_RESONANCE_ENABLED)) {
+ (hap->status_flags & AUTO_RESONANCE_ENABLED) &&
+ !hap->lra_hw_auto_resonance) {
update_lra_frequency(hap);
}
rc = qpnp_hap_mod_enable(hap, on);
if (hap->act_type == QPNP_HAP_LRA &&
- hap->correct_lra_drive_freq) {
+ hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance) {
hrtimer_cancel(&hap->auto_res_err_poll_timer);
}
}
@@ -1670,7 +1685,8 @@ static void qpnp_hap_td_enable(struct timed_output_dev *dev, int value)
mutex_lock(&hap->lock);
if (hap->act_type == QPNP_HAP_LRA &&
- hap->correct_lra_drive_freq)
+ hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance)
hrtimer_cancel(&hap->auto_res_err_poll_timer);
hrtimer_cancel(&hap->hap_timer);
@@ -2199,6 +2215,10 @@ static int qpnp_hap_parse_dt(struct qpnp_hap *hap)
return rc;
}
+ hap->lra_hw_auto_resonance =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,lra-hw-auto-resonance");
+
hap->perform_lra_auto_resonance_search =
of_property_read_bool(pdev->dev.of_node,
"qcom,perform-lra-auto-resonance-search");
@@ -2453,7 +2473,8 @@ static int qpnp_haptic_probe(struct platform_device *pdev)
hap->timed_dev.get_time = qpnp_hap_get_time;
hap->timed_dev.enable = qpnp_hap_td_enable;
- if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq) {
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance) {
hrtimer_init(&hap->auto_res_err_poll_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
hap->auto_res_err_poll_timer.function = detect_auto_res_error;
@@ -2495,7 +2516,8 @@ sysfs_fail:
timed_output_dev_unregister(&hap->timed_dev);
timed_output_fail:
cancel_work_sync(&hap->work);
- if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq)
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance)
hrtimer_cancel(&hap->auto_res_err_poll_timer);
hrtimer_cancel(&hap->hap_timer);
mutex_destroy(&hap->lock);
@@ -2514,7 +2536,8 @@ static int qpnp_haptic_remove(struct platform_device *pdev)
&qpnp_hap_attrs[i].attr);
cancel_work_sync(&hap->work);
- if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq)
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq &&
+ !hap->lra_hw_auto_resonance)
hrtimer_cancel(&hap->auto_res_err_poll_timer);
hrtimer_cancel(&hap->hap_timer);
timed_output_dev_unregister(&hap->timed_dev);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index e937848d7edf..fa916ac5ade4 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -104,6 +104,7 @@ struct qmi_client_info {
struct work_struct svc_exit;
struct work_struct svc_rcv_msg;
struct work_struct ind_ack;
+ struct work_struct qmi_handle_free;
struct workqueue_struct *svc_event_wq;
struct qmi_handle *clnt_handle;
struct notifier_block notifier;
@@ -123,6 +124,18 @@ static void root_service_clnt_recv_msg(struct work_struct *work);
static void root_service_service_arrive(struct work_struct *work);
static void root_service_exit_work(struct work_struct *work);
+static void free_qmi_handle(struct work_struct *work)
+{
+ struct qmi_client_info *data = container_of(work,
+ struct qmi_client_info, qmi_handle_free);
+
+ mutex_lock(&qmi_client_release_lock);
+ data->service_connected = false;
+ qmi_handle_destroy(data->clnt_handle);
+ data->clnt_handle = NULL;
+ mutex_unlock(&qmi_client_release_lock);
+}
+
static struct service_notif_info *_find_service_info(const char *service_path)
{
struct service_notif_info *service_notif;
@@ -426,11 +439,7 @@ static void root_service_service_exit(struct qmi_client_info *data,
* Destroy client handle and try connecting when
* service comes up again.
*/
- mutex_lock(&qmi_client_release_lock);
- data->service_connected = false;
- qmi_handle_destroy(data->clnt_handle);
- data->clnt_handle = NULL;
- mutex_unlock(&qmi_client_release_lock);
+ queue_work(data->svc_event_wq, &data->qmi_handle_free);
}
static void root_service_exit_work(struct work_struct *work)
@@ -486,7 +495,7 @@ static int ssr_event_notify(struct notifier_block *this,
info->subsys_state = ROOT_PD_SHUTDOWN;
break;
}
- queue_work(info->svc_event_wq, &info->svc_exit);
+ root_service_service_exit(info, info->subsys_state);
break;
default:
break;
@@ -561,6 +570,7 @@ static void *add_service_notif(const char *service_path, int instance_id,
INIT_WORK(&qmi_data->svc_exit, root_service_exit_work);
INIT_WORK(&qmi_data->svc_rcv_msg, root_service_clnt_recv_msg);
INIT_WORK(&qmi_data->ind_ack, send_ind_ack);
+ INIT_WORK(&qmi_data->qmi_handle_free, free_qmi_handle);
*curr_state = service_notif->curr_state =
SERVREG_NOTIF_SERVICE_STATE_UNINIT_V01;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 457361ba5ff8..0c44d76bc7c7 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -1744,7 +1744,9 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
}
}
- pr_err("fd [%d] ion buf not found.\n", fd);
+ pr_err("no free entry to store ion handle of fd [%d].\n", fd);
+ /* decrement back the ref count */
+ ion_free(spcom_dev->ion_client, ion_handle);
return -EFAULT;
}
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 1ceded4db79f..f601e6646852 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -531,6 +531,13 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
u8 *payload;
u32 ch_size, ch_cfg_size;
+ mutex_lock(&wpriv->glink_mutex);
+ if (wpriv->ch) {
+ dev_err(wpriv->dev, "%s: glink ch memory is already allocated\n",
+ __func__);
+ ret = -EINVAL;
+ goto done;
+ }
payload = (u8 *)pkt->payload;
no_of_channels = pkt->no_of_channels;
@@ -611,6 +618,7 @@ err_ch_mem:
wpriv->no_of_channels = 0;
done:
+ mutex_unlock(&wpriv->glink_mutex);
return ret;
}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index b7fe42582e89..faa81c28a0d3 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -16,6 +16,7 @@
*
*/
+#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
@@ -402,6 +403,15 @@ static void ion_handle_get(struct ion_handle *handle)
kref_get(&handle->ref);
}
+/* Must hold the client lock */
+static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+{
+ if (atomic_read(&handle->ref.refcount) + 1 == 0)
+ return ERR_PTR(-EOVERFLOW);
+ ion_handle_get(handle);
+ return handle;
+}
+
static int ion_handle_put_nolock(struct ion_handle *handle)
{
int ret;
@@ -448,9 +458,9 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
handle = idr_find(&client->idr, id);
if (handle)
- ion_handle_get(handle);
+ return ion_handle_get_check_overflow(handle);
- return handle ? handle : ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
@@ -1412,7 +1422,7 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
- ion_handle_get(handle);
+ handle = ion_handle_get_check_overflow(handle);
mutex_unlock(&client->lock);
goto end;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index cc616d678d42..830ef92ffe80 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -3155,6 +3155,11 @@ static void msm_hs_pm_suspend(struct device *dev)
mutex_lock(&msm_uport->mtx);
client_count = atomic_read(&msm_uport->client_count);
+ msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
+ msm_hs_resource_off(msm_uport);
+ obs_manage_irq(msm_uport, false);
+ msm_hs_clk_bus_unvote(msm_uport);
+
/* For OBS, don't use wakeup interrupt, set gpio to suspended state */
if (msm_uport->obs) {
ret = pinctrl_select_state(msm_uport->pinctrl,
@@ -3164,10 +3169,6 @@ static void msm_hs_pm_suspend(struct device *dev)
__func__);
}
- msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
- msm_hs_resource_off(msm_uport);
- obs_manage_irq(msm_uport, false);
- msm_hs_clk_bus_unvote(msm_uport);
if (!atomic_read(&msm_uport->client_req_state))
enable_wakeup_interrupt(msm_uport);
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
@@ -3198,6 +3199,16 @@ static int msm_hs_pm_resume(struct device *dev)
goto exit_pm_resume;
if (!atomic_read(&msm_uport->client_req_state))
disable_wakeup_interrupt(msm_uport);
+
+ /* For OBS, don't use wakeup interrupt, set gpio to active state */
+ if (msm_uport->obs) {
+ ret = pinctrl_select_state(msm_uport->pinctrl,
+ msm_uport->gpio_state_active);
+ if (ret)
+ MSM_HS_ERR("%s():Error selecting active state",
+ __func__);
+ }
+
ret = msm_hs_clk_bus_vote(msm_uport);
if (ret) {
MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
@@ -3208,15 +3219,6 @@ static int msm_hs_pm_resume(struct device *dev)
msm_uport->pm_state = MSM_HS_PM_ACTIVE;
msm_hs_resource_on(msm_uport);
- /* For OBS, don't use wakeup interrupt, set gpio to active state */
- if (msm_uport->obs) {
- ret = pinctrl_select_state(msm_uport->pinctrl,
- msm_uport->gpio_state_active);
- if (ret)
- MSM_HS_ERR("%s():Error selecting active state",
- __func__);
- }
-
LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
"%s:PM State:Active client_count %d\n", __func__, client_count);
exit_pm_resume:
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e309dec68a75..1fd5a95b6e99 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2336,9 +2336,6 @@ reset:
bh->outreq->complete = bulk_out_complete;
}
- /* prevents usb LPM until thread runs to completion */
- usb_gadget_autopm_get_noresume(common->gadget);
-
common->running = 1;
for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
@@ -2354,6 +2351,10 @@ static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
fsg->common->new_fsg = fsg;
+
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_async(fsg->common->gadget);
+
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index bf7460f25e61..4a0b3a0aa65e 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -1504,6 +1504,7 @@ mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_request *req;
int i;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
mutex_lock(&dev->read_mutex);
while ((req = mtp_req_get(dev, &dev->tx_idle)))
mtp_request_free(req, dev->ep_in);
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index c9df25286342..aa11cf2f7417 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -330,10 +330,6 @@ static int msm_ssphy_qmp_init(struct usb_phy *uphy)
phy->clk_enabled = true;
}
- /* select usb3 phy mode */
- if (phy->tcsr_usb3_dp_phymode)
- writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
-
writel_relaxed(0x01,
phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
@@ -409,6 +405,10 @@ static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
goto deassert_phy_phy_reset;
}
+ /* select usb3 phy mode */
+ if (phy->tcsr_usb3_dp_phymode)
+ writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
/* Deassert USB3 PHY CSR reset */
ret = reset_control_deassert(phy->phy_reset);
if (ret) {
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 67adc46d1e39..2d499ef903d3 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -1577,8 +1577,14 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
link_training:
dp_drv->power_on = true;
- while (-EAGAIN == mdss_dp_setup_main_link(dp_drv, true))
+ while (-EAGAIN == mdss_dp_setup_main_link(dp_drv, true)) {
pr_debug("MAIN LINK TRAINING RETRY\n");
+ mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
+ /* Disable DP mainlink clocks */
+ mdss_dp_disable_mainlink_clocks(dp_drv);
+ /* Enable DP mainlink clocks with reduced link rate */
+ mdss_dp_enable_mainlink_clocks(dp_drv);
+ }
dp_drv->cont_splash = 0;
@@ -1793,8 +1799,6 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv->edid_buf = edid_init_data.buf;
dp_drv->edid_buf_size = edid_init_data.buf_size;
- mdss_dp_set_default_resolution(dp_drv);
-
return 0;
}
@@ -2009,14 +2013,21 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
pr_debug("start\n");
- mdss_dp_dpcd_cap_read(dp);
+ ret = mdss_dp_dpcd_cap_read(dp);
+ if (ret || !mdss_dp_aux_is_link_rate_valid(dp->dpcd.max_link_rate) ||
+ !mdss_dp_aux_is_lane_count_valid(dp->dpcd.max_lane_count)) {
+ /*
+ * If there is an error in parsing DPCD or if DPCD reports
+ * unsupported link parameters then set the default link
+ * parameters and continue to read EDID.
+ */
+ pr_err("dpcd read failed, set failsafe parameters\n");
+ mdss_dp_set_default_link_parameters(dp);
+ }
ret = mdss_dp_edid_read(dp);
if (ret) {
- pr_debug("edid read error, setting default resolution\n");
-
- mdss_dp_set_default_resolution(dp);
- mdss_dp_set_default_link_parameters(dp);
+ pr_err("edid read error, setting default resolution\n");
goto notify;
}
@@ -2027,15 +2038,19 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
ret = hdmi_edid_parser(dp->panel_data.panel_info.edid_data);
if (ret) {
pr_err("edid parse failed, setting default resolution\n");
-
- mdss_dp_set_default_resolution(dp);
- mdss_dp_set_default_link_parameters(dp);
goto notify;
}
dp->sink_info_read = true;
notify:
+ if (ret) {
+ /* set failsafe parameters */
+ pr_info("falling back to failsafe mode\n");
+ mdss_dp_set_default_resolution(dp);
+ mdss_dp_set_default_link_parameters(dp);
+ }
+
/* Check if there is a PHY_TEST_PATTERN request when we get HPD high.
* Update the DP driver with the test parameters including link rate,
* lane count, voltage level, and pre-emphasis level. Do not notify
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 34b652d843aa..4decb26ea073 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -1038,7 +1038,7 @@ static inline void mdss_dp_reset_frame_crc_data(struct mdss_dp_crc_data *crc)
void mdss_dp_phy_initialize(struct mdss_dp_drv_pdata *dp);
-void mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *dp);
+int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *dp);
int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *dp);
void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *dp);
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 479c367fdc92..8566b1d6985a 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -826,9 +826,9 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
return ret;
}
-static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
- int len)
+int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
{
+ int const len = 16; /* read 16 bytes */
char *bp;
char data;
struct dpcd_cap *cap;
@@ -838,8 +838,15 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
rlen = dp_aux_read_buf(ep, 0, len, 0);
if (rlen <= 0) {
pr_err("edp aux read failed\n");
- return;
+ return rlen;
+ }
+
+ if (rlen != len) {
+ pr_debug("Read size expected(%d) bytes, actual(%d) bytes\n",
+ len, rlen);
+ return -EINVAL;
}
+
rp = &ep->rxp;
cap = &ep->dpcd;
bp = rp->data;
@@ -849,15 +856,11 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
data = *bp++; /* byte 0 */
cap->major = (data >> 4) & 0x0f;
cap->minor = data & 0x0f;
- if (--rlen <= 0)
- return;
pr_debug("version: %d.%d\n", cap->major, cap->minor);
data = *bp++; /* byte 1 */
/* 162, 270 and 540 MB, symbol rate, NOT bit rate */
cap->max_link_rate = data;
- if (--rlen <= 0)
- return;
pr_debug("link_rate=%d\n", cap->max_link_rate);
data = *bp++; /* byte 2 */
@@ -873,8 +876,6 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
data &= 0x0f;
cap->max_lane_count = data;
- if (--rlen <= 0)
- return;
pr_debug("lane_count=%d\n", cap->max_lane_count);
data = *bp++; /* byte 3 */
@@ -887,14 +888,10 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
cap->flags |= DPCD_NO_AUX_HANDSHAKE;
pr_debug("NO Link Training\n");
}
- if (--rlen <= 0)
- return;
data = *bp++; /* byte 4 */
cap->num_rx_port = (data & BIT(0)) + 1;
pr_debug("rx_ports=%d", cap->num_rx_port);
- if (--rlen <= 0)
- return;
data = *bp++; /* Byte 5: DOWN_STREAM_PORT_PRESENT */
cap->downstream_port.dfp_present = data & BIT(0);
@@ -907,13 +904,8 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
pr_debug("format_conversion = %d, detailed_cap_info_available = %d\n",
cap->downstream_port.format_conversion,
cap->downstream_port.detailed_cap_info_available);
- if (--rlen <= 0)
- return;
bp += 1; /* Skip Byte 6 */
- rlen -= 1;
- if (rlen <= 0)
- return;
data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
cap->downstream_port.dfp_count = data & 0x7;
@@ -923,34 +915,23 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
cap->downstream_port.dfp_count,
cap->downstream_port.msa_timing_par_ignored);
pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
- if (--rlen <= 0)
- return;
data = *bp++; /* byte 8 */
if (data & BIT(1)) {
cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
pr_debug("edid presented\n");
}
- if (--rlen <= 0)
- return;
data = *bp++; /* byte 9 */
cap->rx_port0_buf_size = (data + 1) * 32;
pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
- if (--rlen <= 0)
- return;
bp += 2; /* skip 10, 11 port1 capability */
- rlen -= 2;
- if (rlen <= 0)
- return;
data = *bp++; /* byte 12 */
cap->i2c_speed_ctrl = data;
if (cap->i2c_speed_ctrl > 0)
pr_debug("i2c_rate=%d", cap->i2c_speed_ctrl);
- if (--rlen <= 0)
- return;
data = *bp++; /* byte 13 */
cap->scrambler_reset = data & BIT(0);
@@ -962,8 +943,6 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
pr_debug("enhanced_framing=%d\n",
cap->enhanced_frame);
- if (--rlen <= 0)
- return;
data = *bp++; /* byte 14 */
if (data == 0)
@@ -974,6 +953,8 @@ static void dp_sink_capability_read(struct mdss_dp_drv_pdata *ep,
cap->training_read_interval);
dp_sink_parse_sink_count(ep);
+
+ return 0;
}
int mdss_dp_aux_link_status_read(struct mdss_dp_drv_pdata *ep, int len)
@@ -2379,11 +2360,6 @@ clear:
return ret;
}
-void mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
-{
- dp_sink_capability_read(ep, 16);
-}
-
void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *ep)
{
dp_sink_parse_sink_count(ep);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 502bc1570609..37c4be6135aa 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -1510,6 +1510,17 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
*/
active_h = ((((u32)data_buf[0x4] >> 0x4) & 0xF) << 8)
| data_buf[0x2];
+ /*
+ * It is possible that a sink might try to fit in the resolution
+ * which has an active_h of 4096 into a DTD. However, DTD has only
+ * 12 bit to represent active_h which would limit the maximum value
+ * to 4095. If such a case is detected, set the active_h explicitly
+ * to 4096.
+ */
+ if (active_h == 0xFFF) {
+ pr_debug("overriding h_active to 4096\n");
+ active_h++;
+ }
/*
* EDID_TIMING_DESC_H_BLANK[0x3]: Relative Offset to the EDID detailed
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index f05d4cb2922a..42845f9ff192 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -380,6 +380,13 @@ static inline u32 hdmi_tx_is_dvi_mode(struct hdmi_tx_ctrl *hdmi_ctrl)
return hdmi_edid_is_dvi_mode(hdmi_tx_get_fd(HDMI_TX_FEAT_EDID));
} /* hdmi_tx_is_dvi_mode */
+static inline u32 hdmi_tx_is_in_splash(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ return mdata->handoff_pending;
+}
+
static inline bool hdmi_tx_is_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
{
return hdmi_ctrl->hpd_state && hdmi_ctrl->panel_power_on;
@@ -416,15 +423,27 @@ static inline void hdmi_tx_cec_device_suspend(struct hdmi_tx_ctrl *hdmi_ctrl)
}
static inline void hdmi_tx_send_cable_notification(
- struct hdmi_tx_ctrl *hdmi_ctrl, int val)
+ struct hdmi_tx_ctrl *hdmi_ctrl, int val, bool async)
{
if (hdmi_ctrl && hdmi_ctrl->ext_audio_data.intf_ops.hpd) {
u32 flags = 0;
- flags |= MSM_EXT_DISP_HPD_VIDEO;
+ if (async || hdmi_tx_is_in_splash(hdmi_ctrl)) {
+ flags |= MSM_EXT_DISP_HPD_ASYNC_VIDEO;
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
- flags |= MSM_EXT_DISP_HPD_AUDIO;
+ if (async) {
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ flags |= MSM_EXT_DISP_HPD_ASYNC_AUDIO;
+ } else
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
+
+ } else {
+ flags |= MSM_EXT_DISP_HPD_VIDEO;
+
+ if (!hdmi_tx_is_dvi_mode(hdmi_ctrl))
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
+ }
hdmi_ctrl->ext_audio_data.intf_ops.hpd(hdmi_ctrl->ext_pdev,
hdmi_ctrl->ext_audio_data.type, val, flags);
@@ -859,7 +878,11 @@ static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev,
hdmi_tx_config_5v(hdmi_ctrl, false);
} else {
hdmi_tx_hpd_off(hdmi_ctrl);
- hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+ /*
+ * No need to blocking wait for display/audio in this
+ * case since HAL is not up so no ACK can be expected.
+ */
+ hdmi_tx_send_cable_notification(hdmi_ctrl, 0, true);
}
break;
@@ -2339,7 +2362,7 @@ static void hdmi_tx_hpd_int_work(struct work_struct *work)
mutex_unlock(&hdmi_ctrl->tx_lock);
- hdmi_tx_send_cable_notification(hdmi_ctrl, hdmi_ctrl->hpd_state);
+ hdmi_tx_send_cable_notification(hdmi_ctrl, hdmi_ctrl->hpd_state, false);
} /* hdmi_tx_hpd_int_work */
static int hdmi_tx_check_capability(struct hdmi_tx_ctrl *hdmi_ctrl)
@@ -3956,7 +3979,7 @@ static int hdmi_tx_post_evt_handle_resume(struct hdmi_tx_ctrl *hdmi_ctrl)
&hdmi_ctrl->hpd_int_done, HZ/10);
if (!timeout) {
pr_debug("cable removed during suspend\n");
- hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+ hdmi_tx_send_cable_notification(hdmi_ctrl, 0, false);
}
}
@@ -3967,7 +3990,7 @@ static int hdmi_tx_post_evt_handle_panel_on(struct hdmi_tx_ctrl *hdmi_ctrl)
{
if (hdmi_ctrl->panel_suspend) {
pr_debug("panel suspend has triggered\n");
- hdmi_tx_send_cable_notification(hdmi_ctrl, 0);
+ hdmi_tx_send_cable_notification(hdmi_ctrl, 0, false);
}
return 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 5b284e624c7f..87ed56028edd 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -602,9 +602,14 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
mdss_mdp_irq_enable(MDSS_MDP_IRQ_TYPE_CWB_OVERFLOW, CWB_PPB_1);
}
- if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map))
+ if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map)) {
+ u32 reg = 0;
+
+ reg = MDSS_VBIF_READ(ctl->mdata,
+ MDSS_VBIF_WRITE_GATHER_EN, false);
MDSS_VBIF_WRITE(ctl->mdata, MDSS_VBIF_WRITE_GATHER_EN,
- BIT(6), false);
+ reg | BIT(6), false);
+ }
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
@@ -1030,9 +1035,14 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
return ret;
}
- if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map))
+ if (test_bit(MDSS_QOS_WB2_WRITE_GATHER_EN, ctl->mdata->mdss_qos_map)) {
+ u32 reg = 0;
+
+ reg = MDSS_VBIF_READ(ctl->mdata,
+ MDSS_VBIF_WRITE_GATHER_EN, false);
MDSS_VBIF_WRITE(ctl->mdata, MDSS_VBIF_WRITE_GATHER_EN,
- BIT(6), false);
+ reg | BIT(6), false);
+ }
mdss_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
mdss_mdp_writeback_intr_done, ctl);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 7c6938d40e0b..09a34223c2a5 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -2604,9 +2604,10 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
}
ds_data = commit->dest_scaler;
- if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) &&
- ds_data && (ds_data->flags & MDP_DESTSCALER_ENABLE) &&
- commit->dest_scaler_cnt) {
+
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map)
+ && ds_data && commit->dest_scaler_cnt
+ && (ds_data->flags & MDP_DESTSCALER_ENABLE)) {
/*
* Find out which DS block to use based on DS commit info
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 8226557130a2..6abd78629140 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -92,7 +92,8 @@ static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
err_brelse:
brelse(bhs[0]);
err:
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "FAT read failed (blocknr %llu)", (llu)blocknr);
return -EIO;
}
@@ -105,8 +106,8 @@ static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
- fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
- (llu)blocknr);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "FAT read failed (blocknr %llu)", (llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cf644d52c0cf..a6c21fba6e9f 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -760,8 +760,9 @@ retry:
fat_get_blknr_offset(sbi, i_pos, &blocknr, &offset);
bh = sb_bread(sb, blocknr);
if (!bh) {
- fat_msg(sb, KERN_ERR, "unable to read inode block "
- "for updating (i_pos %lld)", i_pos);
+ fat_msg_ratelimit(sb, KERN_ERR,
+ "unable to read inode block for updating (i_pos %lld)",
+ i_pos);
return -EIO;
}
spin_lock(&sbi->inode_hash_lock);
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 04a22505edd7..21a0917119ce 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -66,6 +66,7 @@
#define DIAG_IOCTL_PERIPHERAL_BUF_DRAIN 36
#define DIAG_IOCTL_REGISTER_CALLBACK 37
#define DIAG_IOCTL_HDLC_TOGGLE 38
+#define DIAG_IOCTL_QUERY_PD_LOGGING 39
/* PC Tools IDs */
#define APQ8060_TOOLS_ID 4062
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index d9831d7cbb4e..fc53e861eba4 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -26,9 +26,13 @@
* interface:
* MSM_EXT_DISP_HPD_AUDIO: audio will be routed to external display
* MSM_EXT_DISP_HPD_VIDEO: video will be routed to external display
+ * MSM_EXT_DISP_HPD_ASYNC_AUDIO: don't wait audio notification once wake it up
+ * MSM_EXT_DISP_HPD_ASYNC_VIDEO: don't wait video notification once wake it up
*/
#define MSM_EXT_DISP_HPD_AUDIO BIT(0)
#define MSM_EXT_DISP_HPD_VIDEO BIT(1)
+#define MSM_EXT_DISP_HPD_ASYNC_AUDIO BIT(2)
+#define MSM_EXT_DISP_HPD_ASYNC_VIDEO BIT(3)
/**
* struct ext_disp_cable_notify - cable notify handler structure
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 4023e3a683d3..a0e2283ef4c9 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,7 @@
#define PM660L_SUBTYPE 0x1A
#define PM660_SUBTYPE 0x1B
+/* PMI8998 REV_ID */
#define PMI8998_V1P0_REV1 0x00
#define PMI8998_V1P0_REV2 0x00
#define PMI8998_V1P0_REV3 0x00
@@ -196,6 +197,26 @@
#define PMI8998_V2P0_REV3 0x00
#define PMI8998_V2P0_REV4 0x02
+/* PM660 REV_ID */
+#define PM660_V1P0_REV1 0x00
+#define PM660_V1P0_REV2 0x00
+#define PM660_V1P0_REV3 0x00
+#define PM660_V1P0_REV4 0x01
+
+#define PM660_V1P1_REV1 0x00
+#define PM660_V1P1_REV2 0x00
+#define PM660_V1P1_REV3 0x01
+#define PM660_V1P1_REV4 0x01
+
+/* PMI8998 FAB_ID */
+#define PMI8998_FAB_ID_SMIC 0x11
+#define PMI8998_FAB_ID_GF 0x30
+
+/* PM660 FAB_ID */
+#define PM660_FAB_ID_GF 0x0
+#define PM660_FAB_ID_TSMC 0x2
+#define PM660_FAB_ID_MX 0x3
+
/* PM8005 */
#define PM8005_SUBTYPE 0x18
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
new file mode 100644
index 000000000000..86c2fccc930e
--- /dev/null
+++ b/include/net/cnss_nl.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS_GENETLINK_H_
+#define _NET_CNSS_GENETLINK_H_
+
+#define CLD80211_MAX_COMMANDS 40
+#define CLD80211_MAX_NL_DATA 4096
+
+/**
+ * enum cld80211_attr - Driver/Application embeds the data in nlmsg with the
+ * help of below attributes
+ *
+ * @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
+ * attribute.
+ * @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ *
+ * Any new message in future can be added as another attribute
+ */
+enum cld80211_attr {
+ CLD80211_ATTR_VENDOR_DATA = 1,
+ CLD80211_ATTR_DATA,
+ /* add new attributes above here */
+
+ __CLD80211_ATTR_AFTER_LAST,
+ CLD80211_ATTR_MAX = __CLD80211_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum cld80211_multicast_groups - List of multicast groups supported
+ *
+ * @CLD80211_MCGRP_SVC_MSGS: WLAN service message will be sent to this group.
+ * Ex: Status ind messages
+ * @CLD80211_MCGRP_HOST_LOGS: All logging related messages from driver will be
+ * sent to this multicast group
+ * @CLD80211_MCGRP_FW_LOGS: Firmware logging messages will be sent to this group
+ * @CLD80211_MCGRP_PER_PKT_STATS: Messages related packet stats debugging infra
+ * will be sent to this group
+ * @CLD80211_MCGRP_DIAG_EVENTS: Driver/Firmware status logging diag events will
+ * be sent to this group
+ * @CLD80211_MCGRP_FATAL_EVENTS: Any fatal message generated in driver/firmware
+ * will be sent to this group
+ * @CLD80211_MCGRP_OEM_MSGS: All OEM message will be sent to this group
+ * Ex: LOWI messages
+ */
+enum cld80211_multicast_groups {
+ CLD80211_MCGRP_SVC_MSGS,
+ CLD80211_MCGRP_HOST_LOGS,
+ CLD80211_MCGRP_FW_LOGS,
+ CLD80211_MCGRP_PER_PKT_STATS,
+ CLD80211_MCGRP_DIAG_EVENTS,
+ CLD80211_MCGRP_FATAL_EVENTS,
+ CLD80211_MCGRP_OEM_MSGS,
+};
+
+/**
+ * typedef cld80211_cb - Callback to be called when an nlmsg is received with
+ * the registered cmd_id command from userspace
+ * @data: Payload of the message to be sent to driver
+ * @data_len: Length of the payload
+ * @cb_ctx: callback context to be returned to driver when the callback
+ * is called
+ * @pid: process id of the sender
+ */
+typedef void (*cld80211_cb)(const void *data, int data_len,
+ void *cb_ctx, int pid);
+
+/**
+ * register_cld_cmd_cb() - Allows cld driver to register for commands with
+ * callback
+ * @cmd_id: Command to be registered. Valid range [1, CLD80211_MAX_COMMANDS]
+ * @cb: Callback to be called when an nlmsg is received with cmd_id command
+ * from userspace
+ * @cb_ctx: context provided by driver; Send this as cb_ctx of func()
+ * to driver
+ */
+int register_cld_cmd_cb(u8 cmd_id, cld80211_cb cb, void *cb_ctx);
+
+/**
+ * deregister_cld_cmd_cb() - Allows cld driver to de-register the command it
+ * has already registered
+ * @cmd_id: Command to be deregistered.
+ */
+int deregister_cld_cmd_cb(u8 cmd_id);
+
+/**
+ * cld80211_get_genl_family() - Returns current netlink family context
+ */
+struct genl_family *cld80211_get_genl_family(void);
+
+#endif /* _NET_CNSS_GENETLINK_H_ */
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 06c273252484..a5ba1496eef7 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -443,6 +443,11 @@ struct adm_param_data_v5 {
*/
} __packed;
+#define ASM_STREAM_CMD_REGISTER_PP_EVENTS 0x00013213
+#define ASM_STREAM_PP_EVENT 0x00013214
+#define DSP_STREAM_CMD "ADSP Stream Cmd"
+#define DSP_STREAM_CALLBACK "ADSP Stream Callback Event"
+
/* set customized mixing on matrix mixer */
#define ADM_CMD_SET_PSPD_MTMX_STRTR_PARAMS_V5 0x00010344
struct adm_cmd_set_pspd_mtmx_strtr_params_v5 {
diff --git a/include/sound/q6asm-v2.h b/include/sound/q6asm-v2.h
index d077827647b5..29707b26644a 100644
--- a/include/sound/q6asm-v2.h
+++ b/include/sound/q6asm-v2.h
@@ -618,6 +618,9 @@ int q6asm_get_session_time_legacy(struct audio_client *ac, uint64_t *tstamp);
int q6asm_send_audio_effects_params(struct audio_client *ac, char *params,
uint32_t params_length);
+int q6asm_send_stream_cmd(struct audio_client *ac, uint32_t opcode,
+ void *param, uint32_t params_length);
+
/* Client can set the IO mode to either AIO/SIO mode */
int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 99fe34d25fc5..8baf2bf6df2e 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -253,6 +253,66 @@ struct drm_msm_event_resp {
__u8 data[];
};
+#define MSM_COUNTER_GROUP_CP 0
+#define MSM_COUNTER_GROUP_RBBM 1
+#define MSM_COUNTER_GROUP_PC 2
+#define MSM_COUNTER_GROUP_VFD 3
+#define MSM_COUNTER_GROUP_HLSQ 4
+#define MSM_COUNTER_GROUP_VPC 5
+#define MSM_COUNTER_GROUP_TSE 6
+#define MSM_COUNTER_GROUP_RAS 7
+#define MSM_COUNTER_GROUP_UCHE 8
+#define MSM_COUNTER_GROUP_TP 9
+#define MSM_COUNTER_GROUP_SP 10
+#define MSM_COUNTER_GROUP_RB 11
+#define MSM_COUNTER_GROUP_VBIF 12
+#define MSM_COUNTER_GROUP_VBIF_PWR 13
+#define MSM_COUNTER_GROUP_VSC 23
+#define MSM_COUNTER_GROUP_CCU 24
+#define MSM_COUNTER_GROUP_LRZ 25
+#define MSM_COUNTER_GROUP_CMP 26
+#define MSM_COUNTER_GROUP_ALWAYSON 27
+#define MSM_COUNTER_GROUP_SP_PWR 28
+#define MSM_COUNTER_GROUP_TP_PWR 29
+#define MSM_COUNTER_GROUP_RB_PWR 30
+#define MSM_COUNTER_GROUP_CCU_PWR 31
+#define MSM_COUNTER_GROUP_UCHE_PWR 32
+#define MSM_COUNTER_GROUP_CP_PWR 33
+#define MSM_COUNTER_GROUP_GPMU_PWR 34
+#define MSM_COUNTER_GROUP_ALWAYSON_PWR 35
+
+/**
+ * struct drm_msm_counter - allocate or release a GPU performance counter
+ * @groupid: The group ID of the counter to get/put
+ * @counterid: For GET returns the counterid that was assigned. For PUT
+ * release the counter identified by groupid/counterid
+ * @countable: For GET the countable for the counter
+ */
+struct drm_msm_counter {
+ __u32 groupid;
+ int counterid;
+ __u32 countable;
+ __u32 counter_lo;
+ __u32 counter_hi;
+};
+
+struct drm_msm_counter_read_op {
+ __u64 value;
+ __u32 groupid;
+ int counterid;
+};
+
+/**
+ * struct drm_msm_counter_read - Read a number of GPU performance counters
+ * ops: Pointer to the list of struct drm_msm_counter_read_op operations
+ * nr_ops: Number of operations in the list
+ */
+struct drm_msm_counter_read {
+ __u64 __user ops;
+ __u32 nr_ops;
+};
+
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -267,6 +327,9 @@ struct drm_msm_event_resp {
#define DRM_SDE_WB_CONFIG 0x40
#define DRM_MSM_REGISTER_EVENT 0x41
#define DRM_MSM_DEREGISTER_EVENT 0x42
+#define DRM_MSM_COUNTER_GET 0x43
+#define DRM_MSM_COUNTER_PUT 0x44
+#define DRM_MSM_COUNTER_READ 0x45
/**
* Currently DRM framework supports only VSYNC event.
@@ -289,4 +352,12 @@ struct drm_msm_event_resp {
DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_COUNTER_GET \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_GET, struct drm_msm_counter)
+#define DRM_IOCTL_MSM_COUNTER_PUT \
+ DRM_IOW(DRM_COMMAND_BASE + DRM_MSM_COUNTER_PUT, struct drm_msm_counter)
+#define DRM_IOCTL_MSM_COUNTER_READ \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_COUNTER_READ, \
+ struct drm_msm_counter_read)
+
#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 748b7c277a3c..06f2ca2b0a95 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -402,6 +402,7 @@ header-y += reiserfs_xattr.h
header-y += resource.h
header-y += rfkill.h
header-y += rmnet_data.h
+header-y += rmnet.h
header-y += romfs_fs.h
header-y += rose.h
header-y += route.h
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 1e70483e7352..57266ed29fb3 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -3,11 +3,11 @@
#define ESOC_CODE 0xCC
-#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, u32)
-#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, u32)
-#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, u32)
-#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, u32)
-#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, u32)
+#define ESOC_CMD_EXE _IOW(ESOC_CODE, 1, unsigned int)
+#define ESOC_WAIT_FOR_REQ _IOR(ESOC_CODE, 2, unsigned int)
+#define ESOC_NOTIFY _IOW(ESOC_CODE, 3, unsigned int)
+#define ESOC_GET_STATUS _IOR(ESOC_CODE, 4, unsigned int)
+#define ESOC_WAIT_FOR_CRASH _IOR(ESOC_CODE, 6, unsigned int)
#define ESOC_REG_REQ_ENG _IO(ESOC_CODE, 7)
#define ESOC_REG_CMD_ENG _IO(ESOC_CODE, 8)
diff --git a/include/uapi/linux/rmnet.h b/include/uapi/linux/rmnet.h
new file mode 100644
index 000000000000..698b868076f4
--- /dev/null
+++ b/include/uapi/linux/rmnet.h
@@ -0,0 +1,213 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration specification
+ */
+
+#ifndef _RMNET_DATA_H_
+#define _RMNET_DATA_H_
+
+/* Netlink API */
+#define RMNET_NETLINK_PROTO 31
+#define RMNET_MAX_STR_LEN 16
+#define RMNET_NL_DATA_MAX_LEN 64
+
+#define RMNET_NETLINK_MSG_COMMAND 0
+#define RMNET_NETLINK_MSG_RETURNCODE 1
+#define RMNET_NETLINK_MSG_RETURNDATA 2
+
+/* Constants */
+#define RMNET_EGRESS_FORMAT__RESERVED__ (1<<0)
+#define RMNET_EGRESS_FORMAT_MAP (1<<1)
+#define RMNET_EGRESS_FORMAT_AGGREGATION (1<<2)
+#define RMNET_EGRESS_FORMAT_MUXING (1<<3)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 (1<<4)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 (1<<5)
+
+#define RMNET_INGRESS_FIX_ETHERNET (1<<0)
+#define RMNET_INGRESS_FORMAT_MAP (1<<1)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION (1<<2)
+#define RMNET_INGRESS_FORMAT_DEMUXING (1<<3)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS (1<<4)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3 (1<<5)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 (1<<6)
+
+struct rmnet_nl_msg_s {
+ __be16 reserved;
+ __be16 message_type;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 crd:2,
+ reserved2:14;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 reserved2:14,
+ crd:2;
+#endif
+ union {
+ __be16 arg_length;
+ __be16 return_code;
+ };
+ union {
+ __u8 data[RMNET_NL_DATA_MAX_LEN];
+ struct {
+ __u8 dev[RMNET_MAX_STR_LEN];
+ __be32 flags;
+ __be16 agg_size;
+ __be16 agg_count;
+ __u8 tail_spacing;
+ } data_format;
+ struct {
+ __u8 dev[RMNET_MAX_STR_LEN];
+ __be32 ep_id;
+ __u8 operating_mode;
+ __u8 next_dev[RMNET_MAX_STR_LEN];
+ } local_ep_config;
+ struct {
+ __be32 id;
+ __u8 vnd_name[RMNET_MAX_STR_LEN];
+ } vnd;
+ };
+};
+
+/* RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE - Register RMNET data driver
+ * on a particular device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE 0
+
+/* RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE - Unregister RMNET data
+ * driver on a particular
+ * device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE 1
+
+/* RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED - Get if RMNET data
+ * driver is registered on a
+ * particular device.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 1 if registered, 0 if not
+ */
+#define RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED 2
+
+/* RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT - Sets the egress data
+ * format for a particular
+ * link.
+ * Args: __be32 egress_flags
+ * char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT 3
+
+/* RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT - Gets the egress data
+ * format for a particular
+ * link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 4-bytes data: __be32 egress_flags
+ */
+#define RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT 4
+
+/* RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT - Sets the ingress data
+ * format for a particular
+ * link.
+ * Args: __be32 ingress_flags
+ * char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT 5
+
+/* RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT - Gets the ingress data
+ * format for a particular
+ * link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * Returns: 4-bytes data: __be32 ingress_flags
+ */
+#define RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT 6
+
+/* RMNET_NETLINK_SET_LOGICAL_EP_CONFIG - Sets the logical endpoint
+ * configuration for a particular
+ * link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * __be32 logical_ep_id, valid values are -1 through 31
+ * __u8 rmnet_mode: one of none, vnd, bridged
+ * char[] egress_dev_name: Egress device if operating in bridge mode
+ * Returns: status code
+ */
+#define RMNET_NETLINK_SET_LOGICAL_EP_CONFIG 7
+
+/* RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG - Un-sets the logical endpoint
+ * configuration for a particular
+ * link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * __be32 logical_ep_id, valid values are -1 through 31
+ * Returns: status code
+ */
+#define RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG 8
+
+/* RMNET_NETLINK_GET_LOGICAL_EP_CONFIG - Gets the logical endpoint
+ * configuration for a particular
+ * link.
+ * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+ * __be32 logical_ep_id, valid values are -1 through 31
+ * Returns: __u8 rmnet_mode: one of none, vnd, bridged
+ * char[] egress_dev_name: Egress device
+ */
+#define RMNET_NETLINK_GET_LOGICAL_EP_CONFIG 9
+
+/* RMNET_NETLINK_NEW_VND - Creates a new virtual network device node
+ * Args: __be32 node number
+ * Returns: status code
+ */
+#define RMNET_NETLINK_NEW_VND 10
+
+/* RMNET_NETLINK_NEW_VND_WITH_PREFIX - Creates a new virtual network
+ * device node with the specified
+ * prefix for the device name
+ * Args: __be32 node number
+ * char[] vnd_name - Use as prefix
+ * Returns: status code
+ */
+#define RMNET_NETLINK_NEW_VND_WITH_PREFIX 11
+
+/* RMNET_NETLINK_GET_VND_NAME - Gets the string name of a VND from ID
+ * Args: __be32 node number
+ * Returns: char[] vnd_name
+ */
+#define RMNET_NETLINK_GET_VND_NAME 12
+
+/* RMNET_NETLINK_FREE_VND - Removes virtual network device node
+ * Args: __be32 node number
+ * Returns: status code
+ */
+#define RMNET_NETLINK_FREE_VND 13
+
+/* Pass the frame up the stack with no modifications to skb->dev */
+#define RMNET_EPMODE_NONE 0
+/* Replace skb->dev to a virtual rmnet device and pass up the stack */
+#define RMNET_EPMODE_VND 1
+/* Pass the frame directly to another device with dev_queue_xmit(). */
+#define RMNET_EPMODE_BRIDGE 2
+/* Must be the last item in the list */
+#define RMNET_EPMODE_LENGTH 3
+
+#define RMNET_CONFIG_OK 0
+#define RMNET_CONFIG_UNKNOWN_MESSAGE 1
+#define RMNET_CONFIG_UNKNOWN_ERROR 2
+#define RMNET_CONFIG_NOMEM 3
+#define RMNET_CONFIG_DEVICE_IN_USE 4
+#define RMNET_CONFIG_INVALID_REQUEST 5
+#define RMNET_CONFIG_NO_SUCH_DEVICE 6
+#define RMNET_CONFIG_BAD_ARGUMENTS 7
+#define RMNET_CONFIG_BAD_EGRESS_DEVICE 8
+#define RMNET_CONFIG_TC_HANDLE_FULL 9
+
+#endif /* _RMNET_DATA_H_ */
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
index f59f034a72b9..13bee7a56a0e 100644
--- a/include/uapi/media/msm_media_info.h
+++ b/include/uapi/media/msm_media_info.h
@@ -222,7 +222,7 @@ enum color_fmts {
* Y_Stride = align(Width, 128)
* UV_Stride = align(Width, 128)
* Y_Scanlines = align(Height, 32)
- * UV_Scanlines = align((Height + 96)/2, 16)
+ * UV_Scanlines = align(Height/2, 16)
* Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
* UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
* Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
@@ -235,7 +235,7 @@ enum color_fmts {
*
* Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
* Y_Meta_Plane_size + UV_Meta_Plane_size
- * + Extradata), 4096)
+ * + max(Extradata, Y_Stride * 64), 4096)
*/
COLOR_FMT_NV12_UBWC,
/* Venus NV12 10-bit UBWC:
@@ -311,7 +311,7 @@ enum color_fmts {
* Y_Stride = align(Width * 4/3, 128)
* UV_Stride = align(Width * 4/3, 128)
* Y_Scanlines = align(Height, 32)
- * UV_Scanlines = align((Height + 96)/2, 16)
+ * UV_Scanlines = align(Height/2, 16)
* Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
* UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
* Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
@@ -324,7 +324,7 @@ enum color_fmts {
*
* Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
* Y_Meta_Plane_size + UV_Meta_Plane_size
- * + Extradata), 4096)
+ * + max(Extradata, Y_Stride * 64), 4096)
*/
COLOR_FMT_NV12_BPP10_UBWC,
/* Venus RGBA8888 format:
@@ -970,7 +970,6 @@ static inline unsigned int VENUS_BUFFER_SIZE(
break;
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_NV12_BPP10_UBWC:
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height + 96);
y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
@@ -983,7 +982,8 @@ static inline unsigned int VENUS_BUFFER_SIZE(
uv_meta_scanlines, 4096);
size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane + extra_size;
+ uv_meta_plane + MSM_MEDIA_MAX(extra_size,
+ 64 * y_stride);
size = MSM_MEDIA_ALIGN(size, 4096);
break;
case COLOR_FMT_P010_UBWC:
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
index 2c3e0998d400..ed29c38cd7fb 100644
--- a/kernel/trace/ipc_logging.c
+++ b/kernel/trace/ipc_logging.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -507,8 +507,8 @@ int ipc_log_string(void *ilctxt, const char *fmt, ...)
tsv_qtimer_write(&ectxt);
avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
va_start(arg_list, fmt);
- data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
- avail_size, fmt, arg_list);
+ data_size = vscnprintf((ectxt.buff + ectxt.offset + hdr_size),
+ avail_size, fmt, arg_list);
va_end(arg_list);
tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
ectxt.offset += data_size;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 156a13c7ada8..003dd1d040ca 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -958,6 +958,64 @@ discard:
return 0;
}
+static struct sock *__udp6_lib_demux_lookup(struct net *net,
+ __be16 loc_port, const struct in6_addr *loc_addr,
+ __be16 rmt_port, const struct in6_addr *rmt_addr,
+ int dif)
+{
+ struct sock *sk;
+
+ rcu_read_lock();
+ sk = __udp6_lib_lookup(net, rmt_addr, rmt_port, loc_addr, loc_port,
+ dif, &udp_table);
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+ rcu_read_unlock();
+
+ return sk;
+}
+
+static void udp_v6_early_demux(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ const struct udphdr *uh;
+ struct sock *sk;
+ struct dst_entry *dst;
+ int dif = skb->dev->ifindex;
+
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr)))
+ return;
+
+ uh = udp_hdr(skb);
+
+ if (skb->pkt_type == PACKET_HOST)
+ sk = __udp6_lib_demux_lookup(net, uh->dest,
+ &ipv6_hdr(skb)->daddr,
+ uh->source, &ipv6_hdr(skb)->saddr,
+ dif);
+ else
+ return;
+
+ if (!sk)
+ return;
+
+ skb->sk = sk;
+ skb->destructor = sock_efree;
+ dst = READ_ONCE(sk->sk_rx_dst);
+
+ if (dst)
+ dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
+ if (dst) {
+ if (dst->flags & DST_NOCACHE) {
+ if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+ skb_dst_set(skb, dst);
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
+ }
+}
+
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
@@ -1461,6 +1519,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
#endif
static const struct inet6_protocol udpv6_protocol = {
+ .early_demux = udp_v6_early_demux,
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 52dae06a7ee8..699e7251023f 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -3181,7 +3181,7 @@ static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[] = {
.name = "msm_anlg_cdc_i2s_rx1",
.id = AIF1_PB,
.playback = {
- .stream_name = "Playback",
+ .stream_name = "PDM Playback",
.rates = SDM660_CDC_RATES,
.formats = SDM660_CDC_FORMATS,
.rate_max = 192000,
@@ -3195,7 +3195,7 @@ static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[] = {
.name = "msm_anlg_cdc_i2s_tx1",
.id = AIF1_CAP,
.capture = {
- .stream_name = "Record",
+ .stream_name = "PDM Capture",
.rates = SDM660_CDC_RATES,
.formats = SDM660_CDC_FORMATS,
.rate_max = 48000,
@@ -4167,6 +4167,8 @@ static int msm_anlg_cdc_soc_probe(struct snd_soc_codec *codec)
snd_soc_dapm_ignore_suspend(dapm, "PDM Playback");
snd_soc_dapm_ignore_suspend(dapm, "PDM Capture");
+ snd_soc_dapm_sync(dapm);
+
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 3aa502ba065f..c6074570bb50 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -1207,6 +1207,8 @@ static int msm_dig_cdc_soc_probe(struct snd_soc_codec *codec)
snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX2");
snd_soc_dapm_ignore_suspend(dapm, "PDM_OUT_RX3");
+ snd_soc_dapm_sync(dapm);
+
return 0;
}
@@ -2016,7 +2018,7 @@ static struct snd_soc_codec_driver soc_msm_dig_codec = {
const struct regmap_config msm_digital_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
- .val_bits = 32,
+ .val_bits = 8,
.lock = enable_digital_callback,
.unlock = disable_digital_callback,
.cache_type = REGCACHE_FLAT,
@@ -2127,8 +2129,8 @@ static int msm_dig_resume(struct device *dev)
}
static const struct dev_pm_ops msm_dig_pm_ops = {
- .suspend = msm_dig_suspend,
- .resume = msm_dig_resume,
+ .suspend_late = msm_dig_suspend,
+ .resume_early = msm_dig_resume,
};
#endif
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index 8e986a74ffff..2929ea0d735b 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -45,6 +45,7 @@
#include <sound/msm-dts-eagle.h>
#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
#define DSP_PP_BUFFERING_IN_MSEC 25
#define PARTIAL_DRAIN_ACK_EARLY_BY_MSEC 150
@@ -543,12 +544,19 @@ static void compr_event_handler(uint32_t opcode,
unsigned long flags;
uint64_t read_size;
uint32_t *buff_addr;
+ struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
if (!prtd) {
pr_err("%s: prtd is NULL\n", __func__);
return;
}
cstream = prtd->cstream;
+ if (!cstream) {
+ pr_err("%s: cstream is NULL\n", __func__);
+ return;
+ }
+
ac = prtd->audio_client;
/*
@@ -716,6 +724,23 @@ static void compr_event_handler(uint32_t opcode,
prtd->gapless_state.gapless_transition = 0;
spin_unlock_irqrestore(&prtd->lock, flags);
break;
+ case ASM_STREAM_PP_EVENT:
+ pr_debug("%s: ASM_STREAM_PP_EVENT\n", __func__);
+ rtd = cstream->private_data;
+ if (!rtd) {
+ pr_err("%s: rtd is NULL\n", __func__);
+ return;
+ }
+
+ ret = msm_adsp_inform_mixer_ctl(rtd, DSP_STREAM_CALLBACK,
+ payload);
+ if (ret) {
+ pr_err("%s: failed to inform mixer ctrl. err = %d\n",
+ __func__, ret);
+ return;
+ }
+
+ break;
case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: {
pr_debug("ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY\n");
@@ -815,6 +840,10 @@ static void compr_event_handler(uint32_t opcode,
}
atomic_set(&prtd->close, 0);
break;
+ case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+ pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+ __func__);
+ break;
default:
break;
}
@@ -3578,6 +3607,65 @@ end:
return rc;
}
+static int msm_compr_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_kcontrol_chip(kcontrol);
+ unsigned long fe_id = kcontrol->private_value;
+ struct msm_compr_pdata *pdata = (struct msm_compr_pdata *)
+ snd_soc_component_get_drvdata(comp);
+ struct snd_compr_stream *cstream = NULL;
+ struct msm_compr_audio *prtd;
+ int ret = 0, param_length = 0;
+
+ if (fe_id >= MSM_FRONTEND_DAI_MAX) {
+ pr_err("%s Received invalid fe_id %lu\n",
+ __func__, fe_id);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ cstream = pdata->cstream[fe_id];
+ if (cstream == NULL) {
+ pr_err("%s cstream is null.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ prtd = cstream->runtime->private_data;
+ if (!prtd) {
+ pr_err("%s: prtd is null.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (prtd->audio_client == NULL) {
+ pr_err("%s: audio_client is null.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&param_length, ucontrol->value.bytes.data,
+ sizeof(param_length));
+ if ((param_length + sizeof(param_length))
+ >= sizeof(ucontrol->value.bytes.data)) {
+ pr_err("%s param length=%d exceeds limit",
+ __func__, param_length);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = q6asm_send_stream_cmd(prtd->audio_client,
+ ASM_STREAM_CMD_REGISTER_PP_EVENTS,
+ ucontrol->value.bytes.data + sizeof(param_length),
+ param_length);
+ if (ret < 0)
+ pr_err("%s: failed to register pp event. err = %d\n",
+ __func__, ret);
+done:
+ return ret;
+}
+
static int msm_compr_gapless_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -3854,6 +3942,117 @@ static int msm_compr_add_query_audio_effect_control(
return 0;
}
+static int msm_compr_add_audio_adsp_stream_cmd_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = DSP_STREAM_CMD;
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_cmd_info,
+ .put = msm_compr_adsp_stream_cmd_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+ fe_audio_adsp_stream_cmd_config_control[0].private_value =
+ rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_audio_adsp_stream_cmd_config_control,
+ ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+ if (ret < 0)
+ pr_err("%s: failed to add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
+static int msm_compr_add_audio_adsp_stream_callback_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol *kctl;
+
+ struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_callback_info,
+ .get = msm_adsp_stream_callback_get,
+ .put = msm_adsp_stream_callback_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s: rtd is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_audio_adsp_callback_config_control[0].name = mixer_str;
+ fe_audio_adsp_callback_config_control[0].private_value =
+ rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_audio_adsp_callback_config_control,
+ ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+ if (ret < 0) {
+ pr_err("%s: failed to add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ ret = -EINVAL;
+ goto free_mixer_str;
+ }
+
+ kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+ if (!kctl) {
+ pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+ ret = -EINVAL;
+ goto free_mixer_str;
+ }
+
+ kctl->private_data = NULL;
+free_mixer_str:
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
static int msm_compr_add_dec_runtime_params_control(
struct snd_soc_pcm_runtime *rtd)
{
@@ -4048,6 +4247,16 @@ static int msm_compr_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add Compr Audio Effects Control\n",
__func__);
+ rc = msm_compr_add_audio_adsp_stream_cmd_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr ADSP Stream Cmd Control\n",
+ __func__);
+
+ rc = msm_compr_add_audio_adsp_stream_callback_control(rtd);
+ if (rc)
+ pr_err("%s: Could not add Compr ADSP Stream Callback Control\n",
+ __func__);
+
rc = msm_compr_add_query_audio_effect_control(rtd);
if (rc)
pr_err("%s: Could not add Compr Query Audio Effect Control\n",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index 7928c3791f96..73eadfa4eebb 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -37,6 +37,7 @@
#include "msm-pcm-q6-v2.h"
#include "msm-pcm-routing-v2.h"
+#include "msm-qti-pp-config.h"
enum stream_state {
IDLE = 0,
@@ -147,6 +148,8 @@ static void event_handler(uint32_t opcode,
uint32_t idx = 0;
uint32_t size = 0;
uint8_t buf_index;
+ struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
switch (opcode) {
case ASM_DATA_EVENT_WRITE_DONE_V2: {
@@ -223,6 +226,29 @@ static void event_handler(uint32_t opcode,
}
break;
}
+ case ASM_STREAM_PP_EVENT: {
+ pr_debug("%s: ASM_STREAM_PP_EVENT\n", __func__);
+ if (!substream) {
+ pr_err("%s: substream is NULL.\n", __func__);
+ return;
+ }
+
+ rtd = substream->private_data;
+ if (!rtd) {
+ pr_err("%s: rtd is NULL\n", __func__);
+ return;
+ }
+
+ ret = msm_adsp_inform_mixer_ctl(rtd, DSP_STREAM_CALLBACK,
+ payload);
+ if (ret) {
+ pr_err("%s: failed to inform mixer ctl. err = %d\n",
+ __func__, ret);
+ return;
+ }
+
+ break;
+ }
case APR_BASIC_RSP_RESULT: {
switch (payload[0]) {
case ASM_SESSION_CMD_RUN_V2:
@@ -252,6 +278,10 @@ static void event_handler(uint32_t opcode,
}
atomic_set(&prtd->start, 1);
break;
+ case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+ pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS:",
+ __func__);
+ break;
default:
pr_debug("%s:Payload = [0x%x]stat[0x%x]\n",
__func__, payload[0], payload[1]);
@@ -1036,6 +1066,177 @@ static struct snd_pcm_ops msm_pcm_ops = {
.mmap = msm_pcm_mmap,
};
+static int msm_pcm_adsp_stream_cmd_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *pcm = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform = snd_soc_component_to_platform(pcm);
+ struct msm_plat_data *pdata = dev_get_drvdata(platform->dev);
+ struct snd_pcm_substream *substream;
+ struct msm_audio *prtd;
+ int ret = 0, param_length = 0;
+
+ if (!pdata) {
+ pr_err("%s pdata is NULL\n", __func__);
+ ret = -ENODEV;
+ goto done;
+ }
+
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ if (!substream) {
+ pr_err("%s substream not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (!substream->runtime) {
+ pr_err("%s substream runtime not found\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ prtd = substream->runtime->private_data;
+ if (prtd->audio_client == NULL) {
+ pr_err("%s prtd is null.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ memcpy(&param_length, ucontrol->value.bytes.data,
+ sizeof(param_length));
+ if ((param_length + sizeof(param_length))
+ >= sizeof(ucontrol->value.bytes.data)) {
+ pr_err("%s param length=%d exceeds limit",
+ __func__, param_length);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ret = q6asm_send_stream_cmd(prtd->audio_client,
+ ASM_STREAM_CMD_REGISTER_PP_EVENTS,
+ ucontrol->value.bytes.data + sizeof(param_length),
+ param_length);
+ if (ret < 0)
+ pr_err("%s: failed to register pp event. err = %d\n",
+ __func__, ret);
+done:
+ return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_cmd_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = DSP_STREAM_CMD;
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol_new fe_audio_adsp_stream_cmd_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_cmd_info,
+ .put = msm_pcm_adsp_stream_cmd_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s rtd is NULL\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_audio_adsp_stream_cmd_config_control[0].name = mixer_str;
+ fe_audio_adsp_stream_cmd_config_control[0].private_value =
+ rtd->dai_link->be_id;
+ pr_debug("Registering new mixer ctl %s\n", mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_audio_adsp_stream_cmd_config_control,
+ ARRAY_SIZE(fe_audio_adsp_stream_cmd_config_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
+static int msm_pcm_add_audio_adsp_stream_callback_control(
+ struct snd_soc_pcm_runtime *rtd)
+{
+ const char *mixer_ctl_name = DSP_STREAM_CALLBACK;
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct snd_kcontrol *kctl;
+
+ struct snd_kcontrol_new fe_audio_adsp_callback_config_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_adsp_stream_callback_info,
+ .get = msm_adsp_stream_callback_get,
+ .put = msm_adsp_stream_callback_put,
+ .private_value = 0,
+ }
+ };
+
+ if (!rtd) {
+ pr_err("%s NULL rtd\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%s: added new pcm FE with name %s, id %d, cpu dai %s, device no %d\n",
+ __func__, rtd->dai_link->name, rtd->dai_link->be_id,
+ rtd->dai_link->cpu_dai_name, rtd->pcm->device);
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name, rtd->pcm->device);
+ fe_audio_adsp_callback_config_control[0].name = mixer_str;
+ fe_audio_adsp_callback_config_control[0].private_value =
+ rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ fe_audio_adsp_callback_config_control,
+ ARRAY_SIZE(fe_audio_adsp_callback_config_control));
+ if (ret < 0) {
+ pr_err("%s: failed to add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ ret = -EINVAL;
+ goto free_mixer_str;
+ }
+
+ kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+ if (!kctl) {
+ pr_err("%s: failed to get kctl %s.\n", __func__, mixer_str);
+ ret = -EINVAL;
+ goto free_mixer_str;
+ }
+
+ kctl->private_data = NULL;
+free_mixer_str:
+ kfree(mixer_str);
+done:
+ return ret;
+}
+
static int msm_pcm_set_volume(struct msm_audio *prtd, uint32_t volume)
{
int rc = 0;
@@ -1549,6 +1750,16 @@ static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
pr_err("%s: Could not add pcm Compress Control %d\n",
__func__, ret);
+ ret = msm_pcm_add_audio_adsp_stream_cmd_control(rtd);
+ if (ret)
+ pr_err("%s: Could not add pcm ADSP Stream Cmd Control\n",
+ __func__);
+
+ ret = msm_pcm_add_audio_adsp_stream_callback_control(rtd);
+ if (ret)
+ pr_err("%s: Could not add pcm ADSP Stream Callback Control\n",
+ __func__);
+
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
index 6f463c079f19..d4e78604f868 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.c
@@ -817,6 +817,136 @@ static int msm_qti_pp_asphere_set(struct snd_kcontrol *kcontrol,
return 0;
}
+
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+ const char *mixer_ctl_name,
+ uint32_t *payload)
+{
+ /* adsp pp event notifier */
+ struct snd_kcontrol *kctl;
+ struct snd_ctl_elem_value control;
+ uint32_t payload_size = 0;
+ const char *deviceNo = "NN";
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+
+ if (!rtd || !payload) {
+ pr_err("%s: %s is NULL\n", __func__,
+ (!rtd) ? "rtd" : "payload");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ ctl_len = strlen(mixer_ctl_name) + 1 + strlen(deviceNo) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_ATOMIC);
+ if (!mixer_str) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ snprintf(mixer_str, ctl_len, "%s %d", mixer_ctl_name,
+ rtd->pcm->device);
+ kctl = snd_soc_card_get_kcontrol(rtd->card, mixer_str);
+ kfree(mixer_str);
+ if (!kctl) {
+ pr_err("%s: failed to get kctl.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ control.id = kctl->id;
+ payload_size = payload[0];
+ /* Copy complete payload */
+ memcpy(control.value.bytes.data, (void *)payload,
+ sizeof(payload_size) + payload_size);
+ kctl->put(kctl, &control);
+ if (rtd->card->snd_card == NULL) {
+ pr_err("%s: snd_card is null.\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ snd_ctl_notify(rtd->card->snd_card,
+ SNDRV_CTL_EVENT_MASK_INFO,
+ &control.id);
+done:
+ return ret;
+}
+
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = 512;
+
+ return 0;
+}
+
+int msm_adsp_stream_callback_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ uint32_t payload_size = 0, last_payload_size = 0;
+
+ /* fetch payload size in first four bytes */
+ memcpy(&payload_size, ucontrol->value.bytes.data, sizeof(uint32_t));
+
+ if (kcontrol->private_data == NULL) {
+ /* buffer is empty */
+ kcontrol->private_data =
+ kzalloc(payload_size + sizeof(payload_size),
+ GFP_ATOMIC);
+ if (kcontrol->private_data == NULL)
+ return -ENOMEM;
+ } else {
+ memcpy(&last_payload_size, kcontrol->private_data,
+ sizeof(uint32_t));
+ if (last_payload_size < payload_size) {
+ /* new payload size exceeds old one.
+ * reallocate buffer
+ */
+ kfree(kcontrol->private_data);
+ kcontrol->private_data =
+ kzalloc(payload_size + sizeof(payload_size),
+ GFP_ATOMIC);
+ if (kcontrol->private_data == NULL)
+ return -ENOMEM;
+ }
+ }
+
+ memcpy(kcontrol->private_data, ucontrol->value.bytes.data,
+ sizeof(uint32_t) + payload_size);
+
+ return 0;
+}
+
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ uint32_t payload_size = 0;
+
+ if (kcontrol->private_data == NULL) {
+ pr_err("%s: ASM Stream PP Event Data Unavailable\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(&payload_size, kcontrol->private_data, sizeof(uint32_t));
+ memcpy(ucontrol->value.bytes.data, kcontrol->private_data,
+ sizeof(uint32_t) + payload_size);
+ kfree(kcontrol->private_data);
+ kcontrol->private_data = NULL;
+
+ return 0;
+}
+
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = 512;
+
+ return 0;
+}
+
static int msm_multichannel_ec_primary_mic_ch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
diff --git a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
index f8a1da5e7702..70ce20fbd8f8 100644
--- a/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
+++ b/sound/soc/msm/qdsp6v2/msm-qti-pp-config.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -13,7 +13,17 @@
#define _MSM_QTI_PP_H_
#include <sound/soc.h>
-
+int msm_adsp_inform_mixer_ctl(struct snd_soc_pcm_runtime *rtd,
+ const char *mixer_ctl_name,
+ uint32_t *payload);
+int msm_adsp_stream_cmd_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+int msm_adsp_stream_callback_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_adsp_stream_callback_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int msm_adsp_stream_callback_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
#ifdef CONFIG_QTI_PP
void msm_qti_pp_send_eq_values(int fedai_id);
int msm_qti_pp_send_stereo_to_custom_stereo_cmd(int port_id, int copp_idx,
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 79f27852391f..1ca99c3f9115 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1094,6 +1094,65 @@ fail:
return NULL;
}
+int q6asm_send_stream_cmd(struct audio_client *ac, uint32_t opcode,
+ void *param, uint32_t params_length)
+{
+ char *asm_params = NULL;
+ struct apr_hdr hdr;
+ int sz, rc;
+
+ if (!param || !ac) {
+ pr_err("%s: %s is NULL\n", __func__,
+ (!param) ? "param" : "ac");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ sz = sizeof(struct apr_hdr) + params_length;
+ asm_params = kzalloc(sz, GFP_KERNEL);
+ if (!asm_params) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ q6asm_add_hdr_async(ac, &hdr, sizeof(struct apr_hdr) +
+ params_length, TRUE);
+ atomic_set(&ac->cmd_state_pp, -1);
+ hdr.opcode = opcode;
+ memcpy(asm_params, &hdr, sizeof(struct apr_hdr));
+ memcpy(asm_params + sizeof(struct apr_hdr),
+ param, params_length);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) asm_params);
+ if (rc < 0) {
+ pr_err("%s: audio adsp pp register failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_send_param;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state_pp) >= 0), 1 * HZ);
+ if (!rc) {
+ pr_err("%s: timeout, adsp pp register\n", __func__);
+ rc = -ETIMEDOUT;
+ goto fail_send_param;
+ }
+
+ if (atomic_read(&ac->cmd_state_pp) > 0) {
+ pr_err("%s: DSP returned error[%s] adsp pp register\n",
+ __func__, adsp_err_get_err_str(
+ atomic_read(&ac->cmd_state_pp)));
+ rc = adsp_err_get_lnx_err_code(
+ atomic_read(&ac->cmd_state_pp));
+ goto fail_send_param;
+ }
+
+ rc = 0;
+fail_send_param:
+ kfree(asm_params);
+done:
+ return rc;
+}
+
struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
{
struct audio_client *ac;
@@ -1615,6 +1674,8 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
int32_t ret = 0;
union asm_token_struct asm_token;
uint8_t buf_index;
+ char *pp_event_package = NULL;
+ uint32_t payload_size = 0;
if (ac == NULL) {
pr_err("%s: ac NULL\n", __func__);
@@ -1797,6 +1858,17 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
data->payload_size);
}
break;
+ case ASM_STREAM_CMD_REGISTER_PP_EVENTS:
+ pr_debug("%s: ASM_STREAM_CMD_REGISTER_PP_EVENTS session %d opcode 0x%x token 0x%x src %d dest %d\n",
+ __func__, ac->session,
+ data->opcode, data->token,
+ data->src_port, data->dest_port);
+ if (payload[1] != 0)
+ pr_err("%s: ASM get param error = %d, resuming\n",
+ __func__, payload[1]);
+ atomic_set(&ac->cmd_state_pp, payload[1]);
+ wake_up(&ac->cmd_wait);
+ break;
default:
pr_debug("%s: command[0x%x] not expecting rsp\n",
__func__, payload[0]);
@@ -1967,6 +2039,26 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
case ASM_SESSION_CMDRSP_GET_MTMX_STRTR_PARAMS_V2:
q6asm_process_mtmx_get_param_rsp(ac, (void *) payload);
break;
+ case ASM_STREAM_PP_EVENT:
+ pr_debug("%s: ASM_STREAM_PP_EVENT payload[0][0x%x] payload[1][0x%x]",
+ __func__, payload[0], payload[1]);
+ /* repack payload for asm_stream_pp_event
+ * package is composed of size + actual payload
+ */
+ payload_size = data->payload_size;
+ pp_event_package =
+ kzalloc(payload_size + sizeof(payload_size),
+ GFP_ATOMIC);
+ if (!pp_event_package)
+ return -ENOMEM;
+ memcpy((void *)pp_event_package,
+ &payload_size, sizeof(payload_size));
+ memcpy((void *)pp_event_package + sizeof(payload_size),
+ data->payload, payload_size);
+ ac->cb(data->opcode, data->token,
+ (void *)pp_event_package, ac->priv);
+ kfree(pp_event_package);
+ return 0;
case ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2:
pr_debug("%s: ASM_SESSION_CMDRSP_GET_PATH_DELAY_V2 session %d status 0x%x msw %u lsw %u\n",
__func__, ac->session, payload[0], payload[2],