summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-07-25 11:20:59 -0600
committerLinux Build Service Account <lnxbuild@localhost>2016-07-25 11:20:59 -0600
commit6385cefa179bb87fff0f9e041075280f119b2554 (patch)
tree7268cffe35d29c255c4a777eb55d133c3a8b70fd
parenta9c1a507d16f89cdc7ef01e6af7c9e732c124cdd (diff)
parentcc915a39313a9c5987f56d4e80fafedbb0ffa913 (diff)
Promotion of kernel.lnx.4.4-160725.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 1045203 Icc884b53c85941f59f84bed108b536cc18b8be60 arm: dma-mapping: Export arch_setup_dma_ops symbol 1009124 Ic0dedbadc324b979583d7a3998195bf15ac537f6 msm: kgsl: Preemption cleanups 1044777 I9be1ee26419c082cda6351ef6d5aeedc6e17de55 msm: camera: isp: Vote bandwidth with clock enabled 1009194 Ic0dedbad998767a1ffdfe265e52fae7baa18d203 msm: kgsl: Program the secvid registers in the soft rese 1043961 I639e8bfb1275a1b8c8fa5bff3a46f9b34fe49ffe ARM: dts: msm: add ion heaps for msmfalcon 1040026 Ibb660397f8e7e3e0cd0a5e672924925e605b2eb7 msm: vidc: check for venus_hfi_device state in resume 1043478 Ib35104adf7b3618f94c6adf7fab531abffea3f76 msm: kgsl: Stop fault_timer before reading fault registe 1041122 I21e62361f40eb654c369048d01e158d4b96dd551 clk: msm: Fix MDSS compilation issue 1009149 Ic0dedbad641bfa3fd6cbc1c91a37fb0e37f72bae msm: kgsl: Relax timestamp comparisons for processing ev 1043798 I7a8c2b2fcc9b43e2e858114f7312fccf96dc3f78 of_batterydata: Pass battery id directly to find battery 1041459 I6122ceb014f70b8e0ac005bc3789fbd13a3e867a ARM: dts: msm: Remove SMD device for Modem edge for MSMC 1043377 Ic36a67c724d7f8a0e64affba92856160cce914bb ARM: dts: msm: Add SMEM entry for MSMFALCON 1041199 I32ad5c5c8c9dd30a79818c873cfe1d121fd62d49 ASoC: wcd: add wcd934x gpio controller device 1043961 I2b0d35bf90b92f7fbd4256f339c1567d539e17b2 ARM: dts: msm: Add initial memory map for msmfalcon 1035203 Ib5a2d7879f1b493bc445ab4a2d32a89f98d872b4 NFC: Fix crash due to invalid use of ese gpio 1009187 Ic0dedbadb8f6122e32a0a34a65c54a7bca2a523c msm: kgsl: Remove idle wait in kgsl_iommu_set_pf_policy( 1044736 I08fcb174fd0e0c49f8069e106b48597bcdfe847d clk: qcom: clk-dummy: Add a dummy clock provider 1041199 I889922a0c36ec80ee6ede95b2f19f80791323332 ASoC: wcd9335: add all child devices of codec 1009124 Ic0dedbadb277a6498d0840b45c90e1265e2f354a msm: kgsl: Force all command level context switches to t 1043369 I20253a0f0762982fe7d7cb0bde9f64f58a8068fb msm: vidc: add support for maximum bitrate rate control 1043465 Ic7652e43781f39d3137fb55f2fec2423e457fac4 qcom-charger: smb138x-charger: change power supply type 1009190 Ic0dedbad8476c308a13572d999540b243d97eabc msm: kgsl: Allow a draw context to skip snapshot 1009124 Ic0dedbadc48095eada9c5fce6004475a2cb0f0a9 msm: kgsl: Leave the MMU clocks on with the rest of the 1009124 Ic0dedbadff8df192096292b221130c8ef5b31e12 msm: kgsl: Keep the active draw context until it is swit 1037857 Id53a790cf67b201a4207f85604cb3472275d418a msm: mdss: mdp: fix handling of GDSC off notifier 1043373 Ic28f6f36781154ff0c28f4636ade8523d753ab6a msm: mdss: fix mdss errors on 32-bit compilation 1040761 I4ec609b45f5313d7c19ff6201742b0c5daf54174 ARM: dts: msm: update min voltage for pmcobalt s5 and s7 1042302 Ied8a0048d8af17fa593c1970cabb572aac338786 input: touchscreen: remove dead code in it7258 driver 1042813 I947b8d08f61624b914bd82caf4276364b9833cef soc: qcom: glink: Included sched.h to avoid build error 1025447 Ie302e79ff838837f214ac50ebfaa6e11f0055915 soc: qcom: code cleanup for service-locator 1009124 Ic0dedbad01a31a5da2954b097cb6fa937d45ef5c msm: kgsl: Implement fast preemption for 5XX 1041461 I299d31569291e90431802059f727a57ea0f76200 ARM: dts: msm: Remove SMD device for ADSP edge for MSMCO 1043377 I6df0575cf54003374a5ebdbfa54e169d3545ffe7 soc: qcom: smem: Add CDSP processor ID 1041199 I0489f9149cfd6ec7af056d074cb1869a705f9eff qcom: wcd934x: add pinctrl driver for wcd934x 1037272 I1cc965696c06bd3901d86668aaf597abb3ef2d6d msm: camera isp: Control camif interrupts on camif enabl 1043377 I097e5464ec6ab80c12bcdb5f38d0599fa40da9ee ARM: dts: msm: Add ipc-spinlock entry for MSMFALCON 986311 Ie7720001edbbaed8b202655445707b2b49a69cb1 msm: vidc: Increase output port buffer size for VP9 deco 1036037 I77ecff3a2ac395ff3799c1b0618fbc7f5eeb03da dwc3: gadget: Add debug event for pull up operation 1032174 Id70c3230f761385489e5e94c613f4519239dfb1f wcnss: Avoid user buffer overloading for write cal data 1041199 I0f04c08587f080eb2df7341d41344ce2079de21b ARM: dts: msm: add gpio controller node for msmcobalt 1041462 I9f62cc0e8229b86500fa72908be5c503847aed8f ARM: dts: msm: Remove SMD device for DSPS edge for MSMCO 553571 If56742a9c6b6c4ef774da6e83d57aee56bf28842 Bluetooth: HID: Add support for building hid drivers 507581 I4287e9103769535f43e0934bac08435a524ee6a4 WLAN subsystem: Sysctl support for key TCP/IP parameters 1043369 I3765506f1d703f47e481719296ab890b1f3dc106 msm: vidc: Add support for setting packed QP range 987082 Ic44759d1a5c6e48b2f0f566ea8c153f01cf68279 msm: kgsl: Use the GPU to write the RPTR 1009134 Ic0dedbad6d99130e31cd8a06dfe025610e9157a8 msm: kgsl: Record the ringbuffer start of pipeline times 1009183 Ic0dedbad1015883788e12815806e3249a1e09b21 msm: kgsl: Do INIT_WORK() just before queueing a deferre 1042660 I2b94e78f2470dbc2fd469b5cab287d643e8f3227 sched/core: Fix uninitialized variable used for tracepoi 1043729 If8e08112d065e1327fd54d7b0daf511632aa059f ARM: dts: msm: Add GDSC data for mnoc fab for msmcobalt 1043377 Iba226aa328906427593ecae35fab3396f34c19ae soc: qcom: smem_xprt: Add CDSP entry in smem xprt driver 1022201 I9472f3162a87b8a4255d9c684573093642d488a2 msm: camera: ispif: RDI Pack mode support 1042656 I6e6a57e24b41e4b3d049bfcf694b9ad7e2144dd5 sched/core: Fix null-pointer dereference 1042302 Ic1eab3ba79b8e8e5c259bb92f2692fd0db5fc8d3 input: touchscreen: Add Touch screen driver for IC it725 1041199 If6066a42b8aa5a820263a88627e2405df5e227b7 ARM: dts: msm: add sound node for msmcobalt 1043377 Ibd8ceb8149b3041ce1f58ac4c3642d7391b89385 ARM: dts: msm: Add G-Link SSR entries for MSMFALCON 1009158 Ic0dedbad7416abb23c769a4d3be9ebd0ca04810c msm: kgsl: Remove unneeded error message in kgsl_iommu.c Change-Id: I1db3a206e714668dd1dcf714f3b236a07c92a862 CRs-Fixed: 1042660, 1036037, 1043478, 1041122, 1044736, 1041459, 507581, 1045203, 1009183, 1037857, 1009187, 1009124, 1043961, 553571, 1009149, 1037272, 1025447, 1040026, 1043798, 1043465, 1043373, 1042656, 1043377, 1009190, 1022201, 987082, 1009194, 1042302, 1041461, 1032174, 1035203, 1009134, 1040761, 1042813, 1043729, 1041199, 1041462, 986311, 1044777, 1009158, 1043369
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,wcd-pinctrl.txt138
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi133
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-wcd.dtsi96
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-wsa881x.dtsi51
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi117
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm64/configs/msm-perf_defconfig3
-rw-r--r--arch/arm64/configs/msm_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--drivers/clk/msm/Makefile2
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-dummy.c110
-rw-r--r--drivers/clk/qcom/common.h4
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/a5xx_reg.h2
-rw-r--r--drivers/gpu/msm/adreno.c126
-rw-r--r--drivers/gpu/msm/adreno.h155
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c4
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c563
-rw-r--r--drivers/gpu/msm/adreno_a4xx.h9
-rw-r--r--drivers/gpu/msm/adreno_a4xx_preempt.c571
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c5
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c687
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h20
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c574
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c3
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c617
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h40
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c49
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h3
-rw-r--r--drivers/gpu/msm/adreno_ioctl.c2
-rw-r--r--drivers/gpu/msm/adreno_iommu.c102
-rw-r--r--drivers/gpu/msm/adreno_iommu.h6
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c387
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h26
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c5
-rw-r--r--drivers/gpu/msm/adreno_trace.h101
-rw-r--r--drivers/gpu/msm/kgsl.c32
-rw-r--r--drivers/gpu/msm/kgsl.h41
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h4
-rw-r--r--drivers/gpu/msm/kgsl_device.h1
-rw-r--r--drivers/gpu/msm/kgsl_events.c25
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c50
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h6
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c7
-rw-r--r--drivers/input/touchscreen/it7258_ts_i2c.c851
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c96
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c92
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c95
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c167
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c30
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c199
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h8
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h7
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h42
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c25
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c9
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c74
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c3
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi.h3
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h3
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/wcd9xxx-utils.c4
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c2
-rw-r--r--drivers/nfc/nq-nci.c28
-rw-r--r--drivers/of/of_batterydata.c23
-rw-r--r--drivers/pinctrl/qcom/Kconfig7
-rw-r--r--drivers/pinctrl/qcom/Makefile1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-wcd.c443
-rw-r--r--drivers/power/qcom-charger/qpnp-fg.c17
-rw-r--r--drivers/power/qcom-charger/qpnp-smbcharger.c18
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c2
-rw-r--r--drivers/soc/qcom/glink_private.h1
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c2
-rw-r--r--drivers/soc/qcom/service-locator.c74
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c13
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c3
-rw-r--r--include/linux/of_batterydata.h11
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/soc/qcom/smem.h1
-rw-r--r--include/uapi/linux/msm_kgsl.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h11
-rw-r--r--include/uapi/media/msmb_ispif.h36
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_timer.c34
-rwxr-xr-xsound/soc/codecs/wcd9335.c78
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c78
98 files changed, 4956 insertions, 2706 deletions
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,wcd-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,wcd-pinctrl.txt
new file mode 100644
index 000000000000..add8b7d688a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,wcd-pinctrl.txt
@@ -0,0 +1,138 @@
+Qualcomm Technologies, Inc. WCD GPIO block
+
+This binding describes the GPIO block found in the WCD934X series of
+audio codec's from QTI.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,wcd-pinctrl"
+
+- qcom,num-gpios:
+ Usage: required
+ Value type: <u32>
+ Definition: Number of GPIO's supported by the controller
+
+- gpio-controller:
+ Usage: required
+ Value type: <none>
+ Definition: Mark the device node as a GPIO controller
+
+- #gpio-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: Must be 2;
+ the first cell will be used to define gpio number and the
+ second denotes the flags for this gpio
+
+Please refer to ../gpio/gpio.txt for a general description of GPIO bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin or a list of pins. This configuration can include the
+mux function to select on those pin(s), and various pin configuration
+parameters, as listed below.
+
+
+SUBNODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+ Usage: required
+ Value type: <string-array>
+ Definition: List of gpio pins affected by the properties specified in
+ this subnode. Valid pins are:
+ gpio1-gpio5 for wcd9340
+
+- bias-disable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as no pull.
+
+- bias-pull-down:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins should be configured as pull down.
+
+- bias-pull-up:
+ Usage: optional
+ Value type: <empty>
+ Definition: The specified pins should be configured as pull up.
+
+- qcom,pull-up-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the strength to use for pull up, if selected.
+
+- bias-high-impedance:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins will put in high-Z mode and disabled.
+
+- input-enable:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are put in input mode.
+
+- output-high:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ high.
+
+- output-low:
+ Usage: optional
+ Value type: <none>
+ Definition: The specified pins are configured in output mode, driven
+ low.
+
+- qcom,drive-strength:
+ Usage: optional
+ Value type: <u32>
+ Definition: Selects the drive strength for the specified pins.
+
+Example:
+
+ wcd: wcd_pinctrl@5 {
+ compatible = "qcom,wcd-pinctl";
+ qcom,num-gpios = <5>
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ output-high;
+ };
+ };
+
+ spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ input-enable;
+ };
+ };
+ };
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
index b414a215cbaa..1ef5e6351aa6 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
@@ -109,6 +109,78 @@
"SpkrLeft", "SpkrRight";
};
+ sound-tavil {
+ compatible = "qcom,msmcobalt-asoc-snd-tavil";
+ qcom,model = "msmcobalt-tavil-snd-card";
+
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "AMIC3", "MIC BIAS2",
+ "MIC BIAS2", "ANCRight Headset Mic",
+ "AMIC4", "MIC BIAS2",
+ "MIC BIAS2", "ANCLeft Headset Mic",
+ "AMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Handset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "DMIC5", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic5",
+ "SpkrLeft IN", "SPK1 OUT",
+ "SpkrRight IN", "SPK2 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
+ qcom,tavil-mclk-clk-freq = <9600000>;
+ asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
+ <&loopback>, <&compress>, <&hostless>,
+ <&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>;
+ asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
+ "msm-pcm-dsp.2", "msm-voip-dsp",
+ "msm-pcm-voice", "msm-pcm-loopback",
+ "msm-compress-dsp", "msm-pcm-hostless",
+ "msm-pcm-afe", "msm-lsm-client",
+ "msm-pcm-routing", "msm-cpe-lsm",
+ "msm-compr-dsp";
+ asoc-cpu = <&dai_hdmi>,
+ <&sb_0_rx>, <&sb_0_tx>, <&sb_1_rx>, <&sb_1_tx>,
+ <&sb_2_rx>, <&sb_2_tx>, <&sb_3_rx>, <&sb_3_tx>,
+ <&sb_4_rx>, <&sb_4_tx>, <&sb_5_tx>,
+ <&afe_pcm_rx>, <&afe_pcm_tx>, <&afe_proxy_rx>,
+ <&afe_proxy_tx>, <&incall_record_rx>,
+ <&incall_record_tx>, <&incall_music_rx>,
+ <&incall_music_2_rx>, <&sb_5_rx>,
+ <&usb_audio_rx>, <&usb_audio_tx>;
+ asoc-cpu-names = "msm-dai-q6-hdmi.8",
+ "msm-dai-q6-dev.16384", "msm-dai-q6-dev.16385",
+ "msm-dai-q6-dev.16386", "msm-dai-q6-dev.16387",
+ "msm-dai-q6-dev.16388", "msm-dai-q6-dev.16389",
+ "msm-dai-q6-dev.16390", "msm-dai-q6-dev.16391",
+ "msm-dai-q6-dev.16392", "msm-dai-q6-dev.16393",
+ "msm-dai-q6-dev.16395", "msm-dai-q6-dev.224",
+ "msm-dai-q6-dev.225", "msm-dai-q6-dev.241",
+ "msm-dai-q6-dev.240", "msm-dai-q6-dev.32771",
+ "msm-dai-q6-dev.32772", "msm-dai-q6-dev.32773",
+ "msm-dai-q6-dev.32770", "msm-dai-q6-dev.16394",
+ "msm-dai-q6-dev.28672", "msm-dai-q6-dev.28673";
+ asoc-codec = <&stub_codec>;
+ asoc-codec-names = "msm-stub-codec.1";
+ qcom,wsa-max-devs = <2>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
+ <&wsa881x_0213>, <&wsa881x_0214>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight",
+ "SpkrLeft", "SpkrRight";
+ };
+
cpe: qcom,msm-cpe-lsm {
compatible = "qcom,msm-cpe-lsm";
};
@@ -141,6 +213,15 @@
#clock-cells = <1>;
};
+ clock_audio_lnbb: audio_ext_clk_lnbb {
+ status = "ok";
+ compatible = "qcom,audio-ref-clk";
+ clock-names = "osr_clk";
+ clocks = <&clock_gcc clk_ln_bb_clk2>;
+ qcom,node_has_rpm_clock;
+ #clock-cells = <1>;
+ };
+
wcd_rst_gpio: msm_cdc_pinctrl@64 {
compatible = "qcom,msm-cdc-pinctrl";
qcom,cdc-rst-n-gpio = <&tlmm 64 0>;
@@ -208,4 +289,56 @@
qcom,cdc-dmic-sample-rate = <4800000>;
qcom,cdc-mad-dmic-rate = <600000>;
};
+
+ tavil_codec {
+ compatible = "qcom,tavil-slim-pgd";
+ elemental-addr = [00 01 50 02 17 02];
+
+ interrupt-parent = <&wcd9xxx_intc>;
+ interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
+ 17 18 19 20 21 22 23 24 25 26 27 28 29
+ 30 31>;
+
+ qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>;
+
+ clock-names = "wcd_clk";
+ clocks = <&clock_audio_lnbb clk_audio_pmi_lnbb_clk>;
+
+ cdc-vdd-buck-supply = <&pmcobalt_s4>;
+ qcom,cdc-vdd-buck-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-buck-current = <650000>;
+
+ cdc-buck-sido-supply = <&pmcobalt_s4>;
+ qcom,cdc-buck-sido-voltage = <1800000 1800000>;
+ qcom,cdc-buck-sido-current = <250000>;
+
+ cdc-vdd-tx-h-supply = <&pmcobalt_s4>;
+ qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-tx-h-current = <25000>;
+
+ cdc-vdd-rx-h-supply = <&pmcobalt_s4>;
+ qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>;
+ qcom,cdc-vdd-rx-h-current = <25000>;
+
+ cdc-vddpx-1-supply = <&pmcobalt_s4>;
+ qcom,cdc-vddpx-1-voltage = <1800000 1800000>;
+ qcom,cdc-vddpx-1-current = <10000>;
+
+ qcom,cdc-static-supplies = "cdc-vdd-buck",
+ "cdc-buck-sido",
+ "cdc-vdd-tx-h",
+ "cdc-vdd-rx-h",
+ "cdc-vddpx-1";
+
+ qcom,cdc-micbias1-mv = <1800>;
+ qcom,cdc-micbias2-mv = <1800>;
+ qcom,cdc-micbias3-mv = <1800>;
+ qcom,cdc-micbias4-mv = <1800>;
+
+ qcom,cdc-mclk-clk-rate = <9600000>;
+ qcom,cdc-slim-ifd = "tavil-slim-ifd";
+ qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02];
+ qcom,cdc-dmic-sample-rate = <4800000>;
+ qcom,cdc-mad-dmic-rate = <600000>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
index 3c39a61c4328..86decf438430 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
@@ -131,6 +131,16 @@
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_gcc clk_mmssnoc_axi_a_clk>;
+ clk-mdss-axi-no-rate-supply =
+ <&gdsc_mdss>;
+ clk-mdss-ahb-no-rate-supply =
+ <&gdsc_mdss>;
+ clk-camss-ahb-no-rate-supply =
+ <&gdsc_camss_top>;
+ clk-video-ahb-no-rate-supply =
+ <&gdsc_venus>;
+ clk-video-axi-no-rate-supply =
+ <&gdsc_venus>;
qcom,node-qos-clks {
clock-names =
"clk-noc-cfg-ahb-no-rate",
@@ -141,6 +151,7 @@
"clk-video-ahb-no-rate",
"clk-video-axi-no-rate";
clocks =
+ <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_gcc clk_gcc_mmss_noc_cfg_ahb_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_mdss_ahb_clk>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index 0f3fe60465c1..12ee61b34d8c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -82,7 +82,7 @@
rpm-regulator-smpa5 {
status = "okay";
pmcobalt_s5: regulator-s5 {
- regulator-min-microvolt = <2040000>;
+ regulator-min-microvolt = <1904000>;
regulator-max-microvolt = <2040000>;
status = "okay";
};
@@ -91,7 +91,7 @@
rpm-regulator-smpa7 {
status = "okay";
pmcobalt_s7: regulator-s7 {
- regulator-min-microvolt = <1028000>;
+ regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1028000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-wcd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-wcd.dtsi
new file mode 100644
index 000000000000..7ae084debe06
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-wcd.dtsi
@@ -0,0 +1,96 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&slim_aud {
+ tasha_codec {
+ wsa_spkr_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_sd_n_active>;
+ pinctrl-1 = <&spkr_1_sd_n_sleep>;
+ };
+
+ wsa_spkr_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_sd_n_active>;
+ pinctrl-1 = <&spkr_2_sd_n_sleep>;
+ };
+ };
+
+ tavil_codec {
+ wcd: wcd_pinctrl@5 {
+ compatible = "qcom,wcd-pinctrl";
+ qcom,num-gpios = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ spkr_1_wcd_en_active: spkr_1_wcd_en_active {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ output-high;
+ };
+ };
+
+ spkr_1_wcd_en_sleep: spkr_1_wcd_en_sleep {
+ mux {
+ pins = "gpio2";
+ };
+
+ config {
+ pins = "gpio2";
+ input-enable;
+ };
+ };
+
+ spkr_2_wcd_en_active: spkr_2_sd_n_active {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ output-high;
+ };
+ };
+
+ spkr_2_wcd_en_sleep: spkr_2_sd_n_sleep {
+ mux {
+ pins = "gpio3";
+ };
+
+ config {
+ pins = "gpio3";
+ input-enable;
+ };
+ };
+ };
+
+ wsa_spkr_wcd_sd1: msm_cdc_pinctrll {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_1_wcd_en_active>;
+ pinctrl-1 = <&spkr_1_wcd_en_sleep>;
+ };
+
+ wsa_spkr_wcd_sd2: msm_cdc_pinctrlr {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&spkr_2_wcd_en_active>;
+ pinctrl-1 = <&spkr_2_wcd_en_sleep>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-wsa881x.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-wsa881x.dtsi
index 8f1f699cfc1f..baf05c1c241b 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-wsa881x.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-wsa881x.dtsi
@@ -9,24 +9,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
-&soc {
- wsa_spkr_sd1: msm_cdc_pinctrl@65 {
- compatible = "qcom,msm-cdc-pinctrl";
- qcom,cdc-rst-n-gpio = <&tlmm 65 0>;
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&spkr_1_sd_n_active>;
- pinctrl-1 = <&spkr_1_sd_n_sleep>;
- };
-
- wsa_spkr_sd2: msm_cdc_pinctrl@66 {
- compatible = "qcom,msm-cdc-pinctrl";
- qcom,cdc-rst-n-gpio = <&tlmm 66 0>;
- pinctrl-names = "aud_active", "aud_sleep";
- pinctrl-0 = <&spkr_2_sd_n_active>;
- pinctrl-1 = <&spkr_2_sd_n_sleep>;
- };
-};
+#include "msmcobalt-wcd.dtsi"
&slim_aud {
tasha_codec {
@@ -60,4 +43,36 @@
};
};
};
+
+ tavil_codec {
+ swr_master {
+ compatible = "qcom,swr-wcd";
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ wsa881x_0211: wsa881x@20170211 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170211>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0212: wsa881x@20170212 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x20170212>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+
+ wsa881x_0213: wsa881x@21170213 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170213>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
+ };
+
+ wsa881x_0214: wsa881x@21170214 {
+ compatible = "qcom,wsa881x";
+ reg = <0x00 0x21170214>;
+ qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 85cd7bfd4e37..e748783b0c7d 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -1123,34 +1123,6 @@
reg-names = "smem", "irq-reg-base", "aux-mem1",
"smem_targ_info_reg";
qcom,mpu-enabled;
-
- qcom,smd-modem {
- compatible = "qcom,smd";
- qcom,smd-edge = <0>;
- qcom,smd-irq-offset = <0x0>;
- qcom,smd-irq-bitmask = <0x1000>;
- interrupts = <0 449 1>;
- label = "modem";
- qcom,not-loadable;
- };
-
- qcom,smd-adsp {
- compatible = "qcom,smd";
- qcom,smd-edge = <1>;
- qcom,smd-irq-offset = <0x0>;
- qcom,smd-irq-bitmask = <0x100>;
- interrupts = <0 156 1>;
- label = "adsp";
- };
-
- qcom,smd-dsps {
- compatible = "qcom,smd";
- qcom,smd-edge = <3>;
- qcom,smd-irq-offset = <0x0>;
- qcom,smd-irq-bitmask = <0x2000000>;
- interrupts = <0 176 1>;
- label = "dsps";
- };
};
qcom,msm-adsprpc-mem {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi
new file mode 100644
index 000000000000..f6deef335844
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmfalcon-ion.dtsi
@@ -0,0 +1,52 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ system_contig_heap: qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
+ };
+
+ qcom,ion-heap@22 { /* ADSP HEAP */
+ reg = <22>;
+ memory-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+ reg = <10>;
+ memory-region = <&secure_display_memory>;
+ qcom,ion-heap-type = "HYP_CMA";
+ };
+
+ qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index 0894d116a0db..ea60ed90cf4f 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -130,6 +130,68 @@
};
soc: soc { };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ removed_regions: removed_regions@85800000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x85800000 0x0 0x3700000>;
+ };
+
+ modem_fw_mem: modem_fw_region@8ac00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x8ac00000 0x0 0x7800000>;
+ };
+
+ adsp_fw_mem: adsp_fw_region@92400000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x92400000 0x0 0x1e00000>;
+ };
+
+ cdsp_fw_mem: cdsp_fw_region@94200000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x94200000 0x0 0x600000>;
+ };
+
+ venus_fw_mem: venus_fw_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x80000000 0x0 0x20000000>;
+ reusable;
+ alignment = <0x0 0x400000>;
+ size = <0x0 0x800000>;
+ };
+
+ adsp_mem: adsp_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+ reusable;
+ alignment = <0x0 0x400000>;
+ size = <0x0 0x400000>;
+ };
+
+ qseecom_mem: qseecom_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+ reusable;
+ alignment = <0x0 0x400000>;
+ size = <0x0 0x1400000>;
+ };
+
+ secure_display_memory: secure_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
+ reusable;
+ alignment = <0x0 0x400000>;
+ size = <0x0 0x5c00000>;
+ };
+ };
};
&soc {
@@ -251,4 +313,59 @@
compatible = "qcom,dummycc";
#clock-cells = <1>;
};
+
+ qcom,ipc-spinlock@1f40000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x1f40000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>,
+ <0x778000 0x7000>,
+ <0x1fd4000 0x8>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1",
+ "smem_targ_info_reg";
+ qcom,mpu-enabled;
+ };
+
+ glink_mpss: qcom,glink-ssr-modem {
+ compatible = "qcom,glink_ssr";
+ label = "modem";
+ qcom,edge = "mpss";
+ qcom,notify-edges = <&glink_lpass>, <&glink_rpm>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_lpass: qcom,glink-ssr-adsp {
+ compatible = "qcom,glink_ssr";
+ label = "adsp";
+ qcom,edge = "lpass";
+ qcom,notify-edges = <&glink_mpss>, <&glink_rpm>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_rpm: qcom,glink-ssr-rpm {
+ compatible = "qcom,glink_ssr";
+ label = "rpm";
+ qcom,edge = "rpm";
+ qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_cdsp: qcom,glink-ssr-cdsp {
+ compatible = "qcom,glink_ssr";
+ label = "cdsp";
+ qcom,edge = "cdsp";
+ qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+ <&glink_rpm>;
+ qcom,xprt = "smem";
+ };
};
+
+#include "msmfalcon-ion.dtsi"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bf6a92504175..d41957eae6ef 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2284,6 +2284,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, dma_ops);
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
void arch_teardown_dma_ops(struct device *dev)
{
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index fc2cce36bff9..f396b0b7f4cc 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -408,7 +408,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8996=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index 38e489936895..c2902be72848 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -397,7 +397,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8996=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 72584917f930..be3e4ce1492a 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -390,7 +390,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSMCOBALT=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index e708960cf28b..2e9a7908307b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -395,7 +395,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSMCOBALT=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
index 107af19abff2..daf0b81c9663 100644
--- a/drivers/clk/msm/Makefile
+++ b/drivers/clk/msm/Makefile
@@ -27,4 +27,4 @@ ifeq ($(CONFIG_COMMON_CLK_MSM), y)
endif
obj-$(CONFIG_COMMON_CLK_MSM) += gdsc.o
-obj-$(CONFIG_COMMON_CLK_MSM)-y += mdss/
+obj-$(CONFIG_COMMON_CLK_MSM) += mdss/
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 94419695cd2e..dc1b66f84af2 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -12,6 +12,7 @@ clk-qcom-y += clk-regmap-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
+clk-qcom-y += clk-dummy.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
new file mode 100644
index 000000000000..3205fbc6b8ba
--- /dev/null
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct clk_dummy {
+ struct clk_hw hw;
+ unsigned long rrate;
+};
+
+#define to_clk_dummy(_hw) container_of(_hw, struct clk_dummy, hw)
+
+static int dummy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_dummy *dummy = to_clk_dummy(hw);
+
+ dummy->rrate = rate;
+
+ pr_debug("set rate: %lu\n", rate);
+
+ return 0;
+}
+
+static long dummy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long dummy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_dummy *dummy = to_clk_dummy(hw);
+
+ pr_debug("clock rate: %lu\n", dummy->rrate);
+
+ return dummy->rrate;
+}
+
+struct clk_ops clk_dummy_ops = {
+ .set_rate = dummy_clk_set_rate,
+ .round_rate = dummy_clk_round_rate,
+ .recalc_rate = dummy_clk_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_dummy_ops);
+
+/**
+ * clk_register_dummy - register dummy clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @flags: framework-specific flags
+ */
+static struct clk *clk_register_dummy(struct device *dev, const char *name,
+ unsigned long flags)
+{
+ struct clk_dummy *dummy;
+ struct clk *clk;
+ struct clk_init_data init = {};
+
+ /* allocate dummy clock */
+ dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+ if (!dummy)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_dummy_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.num_parents = 0;
+ dummy->hw.init = &init;
+
+ /* register the clock */
+ clk = clk_register(dev, &dummy->hw);
+ if (IS_ERR(clk))
+ kfree(dummy);
+
+ return clk;
+}
+
+/**
+ * of_dummy_clk_setup() - Setup function for simple fixed rate clock
+ */
+static void of_dummy_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = "dummy_clk";
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_register_dummy(NULL, clk_name, 0);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ pr_info("%s: Dummy clock registered\n", clk_name);
+}
+CLK_OF_DECLARE(dummy_clk, "qcom,dummycc", of_dummy_clk_setup);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index ae9bdeb21f29..10cabca921be 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -48,5 +48,5 @@ extern int qcom_cc_really_probe(struct platform_device *pdev,
struct regmap *regmap);
extern int qcom_cc_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
-
+extern struct clk_ops clk_dummy_ops;
#endif
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index db5a9ca28408..90aee3cad5ad 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -33,6 +33,8 @@ msm_adreno-y += \
adreno_a3xx_snapshot.o \
adreno_a4xx_snapshot.o \
adreno_a5xx_snapshot.o \
+ adreno_a4xx_preempt.o \
+ adreno_a5xx_preempt.o \
adreno_sysfs.o \
adreno.o \
adreno_cp_parser.o \
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index 913cedb885ad..207588844931 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -60,6 +60,8 @@
#define A5XX_CP_RB_BASE 0x800
#define A5XX_CP_RB_BASE_HI 0x801
#define A5XX_CP_RB_CNTL 0x802
+#define A5XX_CP_RB_RPTR_ADDR_LO 0x804
+#define A5XX_CP_RB_RPTR_ADDR_HI 0x805
#define A5XX_CP_RB_RPTR 0x806
#define A5XX_CP_RB_WPTR 0x807
#define A5XX_CP_PFP_STAT_ADDR 0x808
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 26e341a876e8..918231b73215 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -171,6 +171,30 @@ void adreno_writereg64(struct adreno_device *adreno_dev,
}
/**
+ * adreno_get_rptr() - Get the current ringbuffer read pointer
+ * @rb: Pointer the ringbuffer to query
+ *
+ * Get the latest rptr
+ */
+unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ unsigned int rptr = 0;
+
+ if (adreno_is_a3xx(adreno_dev))
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
+ &rptr);
+ else {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_sharedmem_readl(&device->scratch, &rptr,
+ SCRATCH_RPTR_OFFSET(rb->id));
+ }
+
+ return rptr;
+}
+
+/**
* adreno_of_read_property() - Adreno read property
* @node: Device node
*
@@ -1290,6 +1314,28 @@ static void _update_threshold_count(struct adreno_device *adreno_dev,
adreno_dev->lm_threshold_cross = adj;
}
+static void _set_secvid(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Program GPU contect protection init values */
+ if (device->mmu.secured) {
+ if (adreno_is_a4xx(adreno_dev))
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
+
+ adreno_writereg64(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
+ KGSL_IOMMU_SECURE_BASE);
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+ KGSL_IOMMU_SECURE_SIZE);
+ }
+}
+
/**
* _adreno_start - Power up the GPU and prepare to accept commands
* @adreno_dev: Pointer to an adreno_device structure
@@ -1332,26 +1378,13 @@ static int _adreno_start(struct adreno_device *adreno_dev)
if (regulator_left_on)
_soft_reset(adreno_dev);
+ adreno_ringbuffer_set_global(adreno_dev, 0);
+
status = kgsl_mmu_start(device);
if (status)
goto error_pwr_off;
- /* Program GPU contect protection init values */
- if (device->mmu.secured) {
- if (adreno_is_a4xx(adreno_dev))
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
-
- adreno_writereg64(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
- KGSL_IOMMU_SECURE_BASE);
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
- KGSL_IOMMU_SECURE_SIZE);
- }
+ _set_secvid(device);
status = adreno_ocmem_malloc(adreno_dev);
if (status) {
@@ -1533,6 +1566,22 @@ static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
return ret;
}
+static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
+{
+ int i;
+ struct adreno_ringbuffer *rb;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ if (rb->drawctxt_active)
+ kgsl_context_put(&(rb->drawctxt_active->base));
+ rb->drawctxt_active = NULL;
+
+ kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
+ &rb->pagetable_desc, PT_INFO_OFFSET(current_rb_ptname),
+ 0);
+ }
+}
+
static int adreno_stop(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1645,13 +1694,6 @@ int adreno_reset(struct kgsl_device *device, int fault)
else
kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
- /* Set the page table back to the default page table */
- kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), 0);
-
return ret;
}
@@ -2094,9 +2136,15 @@ static int adreno_soft_reset(struct kgsl_device *device)
/* Reset the GPU */
_soft_reset(adreno_dev);
+ /* Set the page table back to the default page table */
+ adreno_ringbuffer_set_global(adreno_dev, 0);
+ kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
+
/* start of new CFF after reset */
kgsl_cffdump_open(device);
+ _set_secvid(device);
+
/* Enable 64 bit gpu addr if feature is set */
if (gpudev->enable_64bit &&
adreno_support_64bit(adreno_dev))
@@ -2149,8 +2197,6 @@ bool adreno_isidle(struct kgsl_device *device)
if (!kgsl_state_is_awake(device))
return true;
- adreno_get_rptr(ADRENO_CURRENT_RINGBUFFER(adreno_dev));
-
/*
* wptr is updated when we add commands to ringbuffer, add a barrier
* to make sure updated wptr is compared to rptr
@@ -2161,15 +2207,13 @@ bool adreno_isidle(struct kgsl_device *device)
* ringbuffer is truly idle when all ringbuffers read and write
* pointers are equal
*/
+
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->rptr != rb->wptr)
- break;
+ if (!adreno_rb_empty(rb))
+ return false;
}
- if (i == adreno_dev->num_ringbuffers)
- return adreno_hw_isidle(adreno_dev);
-
- return false;
+ return adreno_hw_isidle(adreno_dev);
}
/**
@@ -2267,25 +2311,11 @@ static int adreno_drain(struct kgsl_device *device)
/* Caller must hold the device mutex. */
static int adreno_suspend_context(struct kgsl_device *device)
{
- int status = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
/* process any profiling results that are available */
- adreno_profile_process_results(adreno_dev);
+ adreno_profile_process_results(ADRENO_DEVICE(device));
- status = adreno_idle(device);
- if (status)
- return status;
- /* set the device to default pagetable */
- kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), 0);
- /* set ringbuffers to NULL ctxt */
- adreno_set_active_ctxs_null(adreno_dev);
-
- return status;
+ /* Wait for the device to go idle */
+ return adreno_idle(device);
}
/**
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 7ac91f203a70..9f462bca26ce 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -193,6 +193,47 @@ enum adreno_gpurev {
struct adreno_gpudev;
+/* Time to allow preemption to complete (in ms) */
+#define ADRENO_PREEMPT_TIMEOUT 10000
+
+/**
+ * enum adreno_preempt_states
+ * ADRENO_PREEMPT_NONE: No preemption is scheduled
+ * ADRENO_PREEMPT_START: The S/W has started
+ * ADRENO_PREEMPT_TRIGGERED: A preeempt has been triggered in the HW
+ * ADRENO_PREEMPT_FAULTED: The preempt timer has fired
+ * ADRENO_PREEMPT_PENDING: The H/W has signaled preemption complete
+ * ADRENO_PREEMPT_COMPLETE: Preemption could not be finished in the IRQ handler,
+ * worker has been scheduled
+ */
+enum adreno_preempt_states {
+ ADRENO_PREEMPT_NONE = 0,
+ ADRENO_PREEMPT_START,
+ ADRENO_PREEMPT_TRIGGERED,
+ ADRENO_PREEMPT_FAULTED,
+ ADRENO_PREEMPT_PENDING,
+ ADRENO_PREEMPT_COMPLETE,
+};
+
+/**
+ * struct adreno_preemption
+ * @state: The current state of preemption
+ * @counters: Memory descriptor for the memory where the GPU writes the
+ * preemption counters on switch
+ * @timer: A timer to make sure preemption doesn't stall
+ * @work: A work struct for the preemption worker (for 5XX)
+ * @token_submit: Indicates if a preempt token has been submitted in
+ * current ringbuffer (for 4XX)
+ */
+struct adreno_preemption {
+ atomic_t state;
+ struct kgsl_memdesc counters;
+ struct timer_list timer;
+ struct work_struct work;
+ bool token_submit;
+};
+
+
struct adreno_busy_data {
unsigned int gpu_busy;
unsigned int vbif_ram_cycles;
@@ -368,7 +409,7 @@ struct adreno_device {
const struct firmware *lm_fw;
uint32_t *lm_sequence;
uint32_t lm_size;
- struct kgsl_memdesc preemption_counters;
+ struct adreno_preemption preempt;
struct work_struct gpmu_work;
uint32_t lm_leakage;
uint32_t lm_limit;
@@ -458,6 +499,8 @@ enum adreno_regs {
ADRENO_REG_CP_WFI_PEND_CTR,
ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI,
+ ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI,
ADRENO_REG_CP_RB_RPTR,
ADRENO_REG_CP_RB_WPTR,
ADRENO_REG_CP_CNTL,
@@ -709,17 +752,12 @@ struct adreno_gpudev {
void (*pwrlevel_change_settings)(struct adreno_device *,
unsigned int prelevel, unsigned int postlevel,
bool post);
- int (*preemption_pre_ibsubmit)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- struct kgsl_context *, uint64_t cond_addr,
- struct kgsl_memobj_node *);
+ unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *,
+ struct adreno_ringbuffer *rb,
+ unsigned int *, struct kgsl_context *);
int (*preemption_yield_enable)(unsigned int *);
- int (*preemption_post_ibsubmit)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- struct kgsl_context *);
- int (*preemption_token)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- uint64_t gpuaddr);
+ unsigned int (*preemption_post_ibsubmit)(struct adreno_device *,
+ unsigned int *);
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
@@ -1260,34 +1298,32 @@ static inline int adreno_bootstrap_ucode(struct adreno_device *adreno_dev)
}
/**
- * adreno_preempt_state() - Check if preemption state is equal to given state
+ * adreno_in_preempt_state() - Check if preemption state is equal to given state
* @adreno_dev: Device whose preemption state is checked
* @state: State to compare against
*/
-static inline unsigned int adreno_preempt_state(
- struct adreno_device *adreno_dev,
- enum adreno_dispatcher_preempt_states state)
+static inline bool adreno_in_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states state)
{
- return atomic_read(&adreno_dev->dispatcher.preemption_state) ==
- state;
+ return atomic_read(&adreno_dev->preempt.state) == state;
}
-
/**
- * adreno_get_rptr() - Get the current ringbuffer read pointer
- * @rb: Pointer the ringbuffer to query
- *
- * Get the current read pointer from the GPU register.
+ * adreno_set_preempt_state() - Set the specified preemption state
+ * @adreno_dev: Device to change preemption state
+ * @state: State to set
*/
-static inline unsigned int
-adreno_get_rptr(struct adreno_ringbuffer *rb)
+static inline void adreno_set_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states state)
{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- if (adreno_dev->cur_rb == rb &&
- adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR))
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &(rb->rptr));
+ /*
+ * atomic_set doesn't use barriers, so we need to do it ourselves. One
+ * before...
+ */
+ smp_wmb();
+ atomic_set(&adreno_dev->preempt.state, state);
- return rb->rptr;
+ /* ... and one after */
+ smp_wmb();
}
static inline bool adreno_is_preemption_enabled(
@@ -1295,7 +1331,6 @@ static inline bool adreno_is_preemption_enabled(
{
return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
}
-
/**
* adreno_ctx_get_rb() - Return the ringbuffer that a context should
* use based on priority
@@ -1332,25 +1367,6 @@ static inline struct adreno_ringbuffer *adreno_ctx_get_rb(
return &(adreno_dev->ringbuffers[
adreno_dev->num_ringbuffers - 1]);
}
-/*
- * adreno_set_active_ctxs_null() - Put back reference to any active context
- * and set the active context to NULL
- * @adreno_dev: The adreno device
- */
-static inline void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
-{
- int i;
- struct adreno_ringbuffer *rb;
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->drawctxt_active)
- kgsl_context_put(&(rb->drawctxt_active->base));
- rb->drawctxt_active = NULL;
- kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
- &rb->pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname), 0);
- }
-}
/*
* adreno_compare_prio_level() - Compares 2 priority levels based on enum values
@@ -1371,6 +1387,13 @@ void adreno_readreg64(struct adreno_device *adreno_dev,
void adreno_writereg64(struct adreno_device *adreno_dev,
enum adreno_regs lo, enum adreno_regs hi, uint64_t val);
+unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb);
+
+static inline bool adreno_rb_empty(struct adreno_ringbuffer *rb)
+{
+ return (adreno_get_rptr(rb) == rb->wptr);
+}
+
static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
{
return adreno_dev->fast_hang_detect &&
@@ -1400,4 +1423,36 @@ static inline bool adreno_support_64bit(struct adreno_device *adreno_dev)
}
#endif /*BITS_PER_LONG*/
+static inline void adreno_ringbuffer_set_global(
+ struct adreno_device *adreno_dev, int name)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_sharedmem_writel(device,
+ &adreno_dev->ringbuffers[0].pagetable_desc,
+ PT_INFO_OFFSET(current_global_ptname), name);
+}
+
+static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
+ struct kgsl_pagetable *pt)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+
+ kgsl_sharedmem_writel(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(current_rb_ptname), pt->name);
+
+ kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(ttbr0), kgsl_mmu_pagetable_get_ttbr0(pt));
+
+ kgsl_sharedmem_writel(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(contextidr),
+ kgsl_mmu_pagetable_get_contextidr(pt));
+
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index ea8b75f4c83b..2accbe5c5764 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1756,9 +1756,9 @@ static int _ringbuffer_bootstrap_ucode(struct adreno_device *adreno_dev,
*cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
*cmds++ = 0;
- rb->wptr = rb->wptr - 2;
+ rb->_wptr = rb->_wptr - 2;
adreno_ringbuffer_submit(rb, NULL);
- rb->wptr = rb->wptr + 2;
+ rb->_wptr = rb->_wptr + 2;
} else {
for (i = pfp_idx; i < adreno_dev->pfp_fw_size; i++)
*cmds++ = adreno_dev->pfp_fw[i];
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index b1196da0cee1..b15d23cfbe0a 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -178,111 +178,6 @@ static const struct adreno_vbif_platform a4xx_vbif_platforms[] = {
{ adreno_is_a418, a430_vbif },
};
-/* a4xx_preemption_start() - Setup state to start preemption */
-static void a4xx_preemption_start(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t val;
-
- /*
- * Setup scratch registers from which the GPU will program the
- * registers required to start execution of new ringbuffer
- * set ringbuffer address
- */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG8,
- rb->buffer_desc.gpuaddr);
- kgsl_regread(device, A4XX_CP_RB_CNTL, &val);
- /* scratch REG9 corresponds to CP_RB_CNTL register */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG9, val);
- /* scratch REG10 corresponds to rptr address */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10, 0);
- /* scratch REG11 corresponds to rptr */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, rb->rptr);
- /* scratch REG12 corresponds to wptr */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG12, rb->wptr);
- /*
- * scratch REG13 corresponds to IB1_BASE,
- * 0 since we do not do switches in between IB's
- */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG13, 0);
- /* scratch REG14 corresponds to IB1_BUFSZ */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG14, 0);
- /* scratch REG15 corresponds to IB2_BASE */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG15, 0);
- /* scratch REG16 corresponds to IB2_BUFSZ */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG16, 0);
- /* scratch REG17 corresponds to GPR11 */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG17, rb->gpr11);
-}
-
-/* a4xx_preemption_save() - Save the state after preemption is done */
-static void a4xx_preemption_save(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A4XX_CP_SCRATCH_REG18, &rb->rptr);
- kgsl_regread(device, A4XX_CP_SCRATCH_REG23, &rb->gpr11);
-}
-
-static int a4xx_preemption_token(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- uint64_t gpuaddr)
-{
- unsigned int *cmds_orig = cmds;
-
- /* Turn on preemption flag */
- /* preemption token - fill when pt switch command size is known */
- *cmds++ = cp_type3_packet(CP_PREEMPT_TOKEN, 3);
- *cmds++ = (uint)gpuaddr;
- *cmds++ = 1;
- /* generate interrupt on preemption completion */
- *cmds++ = 1 << CP_PREEMPT_ORDINAL_INTERRUPT;
-
- return cmds - cmds_orig;
-
-}
-
-static int a4xx_preemption_pre_ibsubmit(
- struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context, uint64_t cond_addr,
- struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *cmds_orig = cmds;
- int exec_ib = 0;
-
- cmds += a4xx_preemption_token(adreno_dev, rb, cmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context->id, preempted));
-
- if (ib)
- exec_ib = 1;
-
- *cmds++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmds++ = cond_addr;
- *cmds++ = cond_addr;
- *cmds++ = 1;
- *cmds++ = 7 + exec_ib * 3;
- if (exec_ib) {
- *cmds++ = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
- *cmds++ = ib->gpuaddr;
- *cmds++ = (unsigned int) ib->size >> 2;
- }
- /* clear preemption flag */
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = cond_addr;
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_WAIT_MEM_WRITES, 1);
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
- *cmds++ = 0;
-
- return cmds - cmds_orig;
-}
-
/*
* a4xx_is_sptp_idle() - A430 SP/TP should be off to be considered idle
* @adreno_dev: The adreno device pointer
@@ -723,6 +618,8 @@ static void a4xx_start(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A405_VBIF_XIN_HALT_CTRL0_MASK;
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
a4xx_protect_init(adreno_dev);
}
@@ -839,6 +736,7 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO, A4XX_CP_RB_RPTR_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A4XX_CP_CNTL),
@@ -1634,8 +1532,15 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
+ struct kgsl_device *device = &adreno_dev->dev;
+ uint64_t addr;
int ret;
+ addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
+
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
+
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
@@ -1644,8 +1549,8 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
- (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
- (1 << 27));
+ ((ilog2(4) << 8) & 0x1F00) |
+ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
@@ -1755,6 +1660,19 @@ static struct adreno_coresight a4xx_coresight = {
.groups = a4xx_coresight_groups,
};
+static void a4xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
+{
+ if (atomic_read(&adreno_dev->preempt.state) != ADRENO_PREEMPT_TRIGGERED)
+ return;
+
+ trace_adreno_hw_preempt_trig_to_comp_int(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
+}
+
#define A4XX_INT_MASK \
((1 << A4XX_INT_RBBM_AHB_ERROR) | \
(1 << A4XX_INT_RBBM_REG_TIMEOUT) | \
@@ -1792,7 +1710,7 @@ static struct adreno_irq_funcs a4xx_irq_funcs[32] = {
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a4xx_err_callback),
ADRENO_IRQ_CALLBACK(NULL), /* 7 - RBBM_GPC_ERR */
- ADRENO_IRQ_CALLBACK(adreno_dispatcher_preempt_callback), /* 8 - CP_SW */
+ ADRENO_IRQ_CALLBACK(a4xx_preempt_callback), /* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 9 - CP_OPCODE_ERROR */
/* 10 - CP_RESERVED_BIT_ERROR */
ADRENO_IRQ_CALLBACK(a4xx_err_callback),
@@ -1833,433 +1751,6 @@ static struct adreno_snapshot_data a4xx_snapshot_data = {
.sect_sizes = &a4xx_snap_sizes,
};
-#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
-
-static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
- struct adreno_ringbuffer *incoming_rb)
-{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *ringcmds, *start;
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- int ptname;
- struct kgsl_pagetable *pt;
- int pt_switch_sizedwords = 0, total_sizedwords = 20;
- unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
- uint i;
-
- if (incoming_rb->preempted_midway) {
-
- kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
- &ptname, offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname));
- pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
- ptname);
- /*
- * always expect a valid pt, else pt refcounting is
- * messed up or current pt tracking has a bug which
- * could lead to eventual disaster
- */
- BUG_ON(!pt);
- /* set the ringbuffer for incoming RB */
- pt_switch_sizedwords =
- adreno_iommu_set_pt_generate_cmds(incoming_rb,
- &link[0], pt);
- total_sizedwords += pt_switch_sizedwords;
- }
-
- /*
- * Allocate total_sizedwords space in RB, this is the max space
- * required.
- */
- ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
-
- if (IS_ERR(ringcmds))
- return PTR_ERR(ringcmds);
-
- start = ringcmds;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 0;
-
- if (incoming_rb->preempted_midway) {
- for (i = 0; i < pt_switch_sizedwords; i++)
- *ringcmds++ = link[i];
- }
-
- *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
- ADRENO_REG_CP_PREEMPT_DISABLE), 1);
- *ringcmds++ = 0;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 1;
-
- ringcmds += gpudev->preemption_token(adreno_dev, rb, ringcmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, preempted));
-
- if ((uint)(ringcmds - start) > total_sizedwords) {
- KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
- BUG();
- }
-
- /*
- * If we have commands less than the space reserved in RB
- * adjust the wptr accordingly
- */
- rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
-
- /* submit just the preempt token */
- mb();
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
- return 0;
-}
-
-/**
- * a4xx_preempt_trig_state() - Schedule preemption in TRIGGERRED
- * state
- * @adreno_dev: Device which is in TRIGGERRED state
- */
-static void a4xx_preempt_trig_state(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int rbbase, val;
-
- /*
- * Hardware not yet idle means that preemption interrupt
- * may still occur, nothing to do here until interrupt signals
- * completion of preemption, just return here
- */
- if (!adreno_hw_isidle(adreno_dev))
- return;
-
- /*
- * We just changed states, reschedule dispatcher to change
- * preemption states
- */
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
- /*
- * H/W is idle and we did not get a preemption interrupt, may
- * be device went idle w/o encountering any preempt token or
- * we already preempted w/o interrupt
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
- /* Did preemption occur, if so then change states and return */
- if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val);
- if (val && rbbase == adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_INFO(device,
- "Preemption completed without interrupt\n");
- trace_adreno_hw_preempt_trig_to_comp(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- adreno_dispatcher_schedule(device);
- return;
- }
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
- return;
- }
- /*
- * Check if preempt token was submitted after preemption trigger, if so
- * then preemption should have occurred, since device is already idle it
- * means something went wrong - trigger FT
- */
- if (dispatcher->preempt_token_submit) {
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
- return;
- }
- /*
- * Preempt token was not submitted after preemption trigger so device
- * may have gone idle before preemption could occur, if there are
- * commands that got submitted to current RB after triggering preemption
- * then submit them as those commands may have a preempt token in them
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &adreno_dev->cur_rb->rptr);
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr) {
- /*
- * Memory barrier before informing the
- * hardware of new commands
- */
- mb();
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- return;
- }
-
- /* Submit preempt token to make preemption happen */
- if (adreno_drawctxt_switch(adreno_dev, adreno_dev->cur_rb, NULL, 0))
- BUG();
- if (a4xx_submit_preempt_token(adreno_dev->cur_rb,
- adreno_dev->next_rb))
- BUG();
- dispatcher->preempt_token_submit = 1;
- adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
- trace_adreno_hw_preempt_token_submit(adreno_dev->cur_rb,
- adreno_dev->next_rb);
-}
-
-/**
- * a4xx_preempt_clear_state() - Schedule preemption in
- * CLEAR state. Preemption can be issued in this state.
- * @adreno_dev: Device which is in CLEAR state
- */
-static void a4xx_preempt_clear_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_tempq;
- struct kgsl_cmdbatch *cmdbatch;
- struct adreno_ringbuffer *highest_busy_rb;
- int switch_low_to_high;
- int ret;
-
- /* Device not awake means there is nothing to do */
- if (!kgsl_state_is_awake(device))
- return;
-
- /* keep updating the current rptr when preemption is clear */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
- if (!highest_busy_rb)
- return;
-
- switch_low_to_high = adreno_compare_prio_level(
- highest_busy_rb->id,
- adreno_dev->cur_rb->id);
-
- /* already current then return */
- if (!switch_low_to_high)
- return;
-
- if (switch_low_to_high < 0) {
- /*
- * if switching to lower priority make sure that the rptr and
- * wptr are equal, when the lower rb is not starved
- */
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
- return;
- /*
- * switch to default context because when we switch back
- * to higher context then its not known which pt will
- * be current, so by making it default here the next
- * commands submitted will set the right pt
- */
- ret = adreno_drawctxt_switch(adreno_dev,
- adreno_dev->cur_rb,
- NULL, 0);
- /*
- * lower priority RB has to wait until space opens up in
- * higher RB
- */
- if (ret)
- return;
-
- adreno_writereg(adreno_dev,
- ADRENO_REG_CP_PREEMPT_DISABLE, 1);
- }
-
- /*
- * setup registers to do the switch to highest priority RB
- * which is not empty or may be starving away(poor thing)
- */
- a4xx_preemption_start(adreno_dev, highest_busy_rb);
-
- /* turn on IOMMU as the preemption may trigger pt switch */
- kgsl_mmu_enable_clk(&device->mmu);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED);
-
- adreno_dev->next_rb = highest_busy_rb;
- mod_timer(&dispatcher->preempt_timer, jiffies +
- msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));
-
- trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- /* issue PREEMPT trigger */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
- /*
- * IOMMU clock can be safely switched off after the timestamp
- * of the first command in the new rb
- */
- dispatch_tempq = &adreno_dev->next_rb->dispatch_q;
- if (dispatch_tempq->head != dispatch_tempq->tail)
- cmdbatch = dispatch_tempq->cmd_q[dispatch_tempq->head];
- else
- cmdbatch = NULL;
- if (cmdbatch)
- adreno_ringbuffer_mmu_disable_clk_on_ts(device,
- adreno_dev->next_rb,
- cmdbatch->global_ts);
- else
- adreno_ringbuffer_mmu_disable_clk_on_ts(device,
- adreno_dev->next_rb, adreno_dev->next_rb->timestamp);
- /* submit preempt token packet to ensure preemption */
- if (switch_low_to_high < 0) {
- ret = a4xx_submit_preempt_token(
- adreno_dev->cur_rb, adreno_dev->next_rb);
- /*
- * unexpected since we are submitting this when rptr = wptr,
- * this was checked above already
- */
- BUG_ON(ret);
- dispatcher->preempt_token_submit = 1;
- adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
- } else {
- dispatcher->preempt_token_submit = 0;
- adreno_dispatcher_schedule(device);
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- }
-}
-
-/**
- * a4xx_preempt_complete_state() - Schedule preemption in
- * COMPLETE state
- * @adreno_dev: Device which is in COMPLETE state
- */
-static void a4xx_preempt_complete_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_q;
- unsigned int wptr, rbbase;
- unsigned int val, val1;
-
- del_timer_sync(&dispatcher->preempt_timer);
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val1);
-
- if (val || !val1) {
- KGSL_DRV_ERR(device,
- "Invalid state after preemption CP_PREEMPT: %08x, CP_PREEMPT_DEBUG: %08x\n",
- val, val1);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
- if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_ERR(device,
- "RBBASE incorrect after preemption, expected %x got %016llx\b",
- rbbase,
- adreno_dev->next_rb->buffer_desc.gpuaddr);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
-
- a4xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
-
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- /* new RB is the current RB */
- trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
- adreno_dev->cur_rb);
- adreno_dev->prev_rb = adreno_dev->cur_rb;
- adreno_dev->cur_rb = adreno_dev->next_rb;
- adreno_dev->cur_rb->preempted_midway = 0;
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- adreno_dev->next_rb = NULL;
- if (adreno_disp_preempt_fair_sched) {
- /* starved rb is now scheduled so unhalt dispatcher */
- if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
- adreno_dev->cur_rb->starve_timer_state)
- adreno_put_gpu_halt(adreno_dev);
- adreno_dev->cur_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
- adreno_dev->cur_rb->sched_timer = jiffies;
- /*
- * If the outgoing RB is has commands then set the
- * busy time for it
- */
- if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- adreno_dev->prev_rb->sched_timer = jiffies;
- } else {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- }
- }
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
- if (adreno_compare_prio_level(adreno_dev->prev_rb->id,
- adreno_dev->cur_rb->id) < 0) {
- if (adreno_dev->prev_rb->wptr_preempt_end !=
- adreno_dev->prev_rb->rptr)
- adreno_dev->prev_rb->preempted_midway = 1;
- } else if (adreno_dev->prev_rb->wptr_preempt_end !=
- adreno_dev->prev_rb->rptr) {
- BUG();
- }
- /* submit wptr if required for new rb */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
- if (adreno_dev->cur_rb->wptr != wptr) {
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- }
- /* clear preemption register */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, 0);
- adreno_preempt_process_dispatch_queue(adreno_dev, dispatch_q);
-}
-
-static void a4xx_preemption_schedule(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!adreno_is_preemption_enabled(adreno_dev))
- return;
-
- mutex_lock(&device->mutex);
-
- switch (atomic_read(&dispatcher->preemption_state)) {
- case ADRENO_DISPATCHER_PREEMPT_CLEAR:
- a4xx_preempt_clear_state(adreno_dev);
- break;
- case ADRENO_DISPATCHER_PREEMPT_TRIGGERED:
- a4xx_preempt_trig_state(adreno_dev);
- /*
- * if we transitioned to next state then fall-through
- * processing to next state
- */
- if (!adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE))
- break;
- case ADRENO_DISPATCHER_PREEMPT_COMPLETE:
- a4xx_preempt_complete_state(adreno_dev);
- break;
- default:
- BUG();
- }
-
- mutex_unlock(&device->mutex);
-}
-
struct adreno_gpudev adreno_a4xx_gpudev = {
.reg_offsets = &a4xx_reg_offsets,
.ft_perf_counters = a4xx_ft_perf_counters,
@@ -2284,6 +1775,6 @@ struct adreno_gpudev adreno_a4xx_gpudev = {
.regulator_enable = a4xx_regulator_enable,
.regulator_disable = a4xx_regulator_disable,
.preemption_pre_ibsubmit = a4xx_preemption_pre_ibsubmit,
- .preemption_token = a4xx_preemption_token,
.preemption_schedule = a4xx_preemption_schedule,
+ .preemption_init = a4xx_preemption_init,
};
diff --git a/drivers/gpu/msm/adreno_a4xx.h b/drivers/gpu/msm/adreno_a4xx.h
index e425dc8e9f7b..5dabc26fd34f 100644
--- a/drivers/gpu/msm/adreno_a4xx.h
+++ b/drivers/gpu/msm/adreno_a4xx.h
@@ -47,6 +47,15 @@
"RBBM_DPM_THERMAL_YELLOW_ERR" }, \
{ BIT(A4XX_INT_RBBM_DPM_THERMAL_RED_ERR), "RBBM_DPM_THERMAL_RED_ERR" }
+unsigned int a4xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds,
+ struct kgsl_context *context);
+
+void a4xx_preemption_schedule(struct adreno_device *adreno_dev);
+
+int a4xx_preemption_init(struct adreno_device *adreno_dev);
+
void a4xx_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot);
diff --git a/drivers/gpu/msm/adreno_a4xx_preempt.c b/drivers/gpu/msm/adreno_a4xx_preempt.c
new file mode 100644
index 000000000000..4087ac60c89e
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a4xx_preempt.c
@@ -0,0 +1,571 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "adreno.h"
+#include "adreno_a4xx.h"
+#include "adreno_trace.h"
+#include "adreno_pm4types.h"
+
+#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
+
+static void a4xx_preemption_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int cur_rptr = adreno_get_rptr(adreno_dev->cur_rb);
+ unsigned int next_rptr = adreno_get_rptr(adreno_dev->next_rb);
+
+ KGSL_DRV_ERR(device,
+ "Preemption timed out. cur_rb rptr/wptr %x/%x id %d, next_rb rptr/wptr %x/%x id %d, disp_state: %d\n",
+ cur_rptr, adreno_dev->cur_rb->wptr, adreno_dev->cur_rb->id,
+ next_rptr, adreno_dev->next_rb->wptr, adreno_dev->next_rb->id,
+ atomic_read(&adreno_dev->preempt.state));
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+static unsigned int a4xx_preemption_token(struct adreno_device *adreno_dev,
+ unsigned int *cmds, uint64_t gpuaddr)
+{
+ unsigned int *cmds_orig = cmds;
+
+ /* Turn on preemption flag */
+ /* preemption token - fill when pt switch command size is known */
+ *cmds++ = cp_type3_packet(CP_PREEMPT_TOKEN, 3);
+ *cmds++ = (uint)gpuaddr;
+ *cmds++ = 1;
+ /* generate interrupt on preemption completion */
+ *cmds++ = 1 << CP_PREEMPT_ORDINAL_INTERRUPT;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+unsigned int a4xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, unsigned int *cmds,
+ struct kgsl_context *context)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int *cmds_orig = cmds;
+ unsigned int cond_addr = device->memstore.gpuaddr +
+ MEMSTORE_ID_GPU_ADDR(device, context->id, preempted);
+
+ cmds += a4xx_preemption_token(adreno_dev, cmds, cond_addr);
+
+ *cmds++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmds++ = cond_addr;
+ *cmds++ = cond_addr;
+ *cmds++ = 1;
+ *cmds++ = 7;
+
+ /* clear preemption flag */
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = cond_addr;
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_MEM_WRITES, 1);
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+
+static void a4xx_preemption_start(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ uint32_t val;
+
+ /*
+ * Setup scratch registers from which the GPU will program the
+ * registers required to start execution of new ringbuffer
+ * set ringbuffer address
+ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG8,
+ rb->buffer_desc.gpuaddr);
+ kgsl_regread(device, A4XX_CP_RB_CNTL, &val);
+ /* scratch REG9 corresponds to CP_RB_CNTL register */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG9, val);
+ /* scratch REG10 corresponds to rptr address */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10,
+ SCRATCH_RPTR_GPU_ADDR(device, rb->id));
+ /* scratch REG11 corresponds to rptr */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, adreno_get_rptr(rb));
+ /* scratch REG12 corresponds to wptr */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG12, rb->wptr);
+ /*
+ * scratch REG13 corresponds to IB1_BASE,
+ * 0 since we do not do switches in between IB's
+ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG13, 0);
+ /* scratch REG14 corresponds to IB1_BUFSZ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG14, 0);
+ /* scratch REG15 corresponds to IB2_BASE */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG15, 0);
+ /* scratch REG16 corresponds to IB2_BUFSZ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG16, 0);
+ /* scratch REG17 corresponds to GPR11 */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG17, rb->gpr11);
+}
+
+static void a4xx_preemption_save(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_regread(device, A4XX_CP_SCRATCH_REG23, &rb->gpr11);
+}
+
+
+static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
+ struct adreno_ringbuffer *incoming_rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int *ringcmds, *start;
+ int ptname;
+ struct kgsl_pagetable *pt;
+ int pt_switch_sizedwords = 0, total_sizedwords = 20;
+ unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
+ uint i;
+
+ if (incoming_rb->preempted_midway) {
+
+ kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
+ &ptname, PT_INFO_OFFSET(current_rb_ptname));
+ pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
+ ptname);
+ /* set the ringbuffer for incoming RB */
+ pt_switch_sizedwords =
+ adreno_iommu_set_pt_generate_cmds(incoming_rb,
+ &link[0], pt);
+ total_sizedwords += pt_switch_sizedwords;
+ }
+
+ /*
+ * Allocate total_sizedwords space in RB, this is the max space
+ * required.
+ */
+ ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+
+ start = ringcmds;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 0;
+
+ if (incoming_rb->preempted_midway) {
+ for (i = 0; i < pt_switch_sizedwords; i++)
+ *ringcmds++ = link[i];
+ }
+
+ *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_PREEMPT_DISABLE), 1);
+ *ringcmds++ = 0;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 1;
+
+ ringcmds += a4xx_preemption_token(adreno_dev, ringcmds,
+ device->memstore.gpuaddr +
+ MEMSTORE_RB_OFFSET(rb, preempted));
+
+ if ((uint)(ringcmds - start) > total_sizedwords)
+ KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
+
+ /*
+ * If we have commands less than the space reserved in RB
+ * adjust the wptr accordingly
+ */
+ rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
+
+ /* submit just the preempt token */
+ mb();
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+ return 0;
+}
+
+static void a4xx_preempt_trig_state(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int rbbase, val;
+ int ret;
+
+ /*
+ * Hardware not yet idle means that preemption interrupt
+ * may still occur, nothing to do here until interrupt signals
+ * completion of preemption, just return here
+ */
+ if (!adreno_hw_isidle(adreno_dev))
+ return;
+
+ /*
+ * We just changed states, reschedule dispatcher to change
+ * preemption states
+ */
+
+ if (atomic_read(&adreno_dev->preempt.state) !=
+ ADRENO_PREEMPT_TRIGGERED) {
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
+ /*
+ * H/W is idle and we did not get a preemption interrupt, may
+ * be device went idle w/o encountering any preempt token or
+ * we already preempted w/o interrupt
+ */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
+ /* Did preemption occur, if so then change states and return */
+ if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val);
+ if (val && rbbase == adreno_dev->next_rb->buffer_desc.gpuaddr) {
+ KGSL_DRV_INFO(device,
+ "Preemption completed without interrupt\n");
+ trace_adreno_hw_preempt_trig_to_comp(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+ adreno_set_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ /* reschedule dispatcher to take care of the fault */
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ /*
+ * Check if preempt token was submitted after preemption trigger, if so
+ * then preemption should have occurred, since device is already idle it
+ * means something went wrong - trigger FT
+ */
+ if (adreno_dev->preempt.token_submit) {
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ /* reschedule dispatcher to take care of the fault */
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ /*
+ * Preempt token was not submitted after preemption trigger so device
+ * may have gone idle before preemption could occur, if there are
+ * commands that got submitted to current RB after triggering preemption
+ * then submit them as those commands may have a preempt token in them
+ */
+ if (!adreno_rb_empty(adreno_dev->cur_rb)) {
+ /*
+ * Memory barrier before informing the
+ * hardware of new commands
+ */
+ mb();
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ adreno_dev->cur_rb->wptr);
+ return;
+ }
+
+ /* Submit preempt token to make preemption happen */
+ ret = adreno_drawctxt_switch(adreno_dev, adreno_dev->cur_rb,
+ NULL, 0);
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "Unable to switch context to NULL: %d\n", ret);
+
+ ret = a4xx_submit_preempt_token(adreno_dev->cur_rb,
+ adreno_dev->next_rb);
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "Unable to submit preempt token: %d\n", ret);
+
+ adreno_dev->preempt.token_submit = true;
+ adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
+ trace_adreno_hw_preempt_token_submit(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+}
+
+static struct adreno_ringbuffer *a4xx_next_ringbuffer(
+ struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb, *next = NULL;
+ int i;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ if (!adreno_rb_empty(rb) && next == NULL) {
+ next = rb;
+ continue;
+ }
+
+ if (!adreno_disp_preempt_fair_sched)
+ continue;
+
+ switch (rb->starve_timer_state) {
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT:
+ if (!adreno_rb_empty(rb) &&
+ adreno_dev->cur_rb != rb) {
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
+ rb->sched_timer = jiffies;
+ }
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT:
+ if (time_after(jiffies, rb->sched_timer +
+ msecs_to_jiffies(
+ adreno_dispatch_starvation_time))) {
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED;
+ /* halt dispatcher to remove starvation */
+ adreno_get_gpu_halt(adreno_dev);
+ }
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED:
+ /*
+ * If the RB has not been running for the minimum
+ * time slice then allow it to run
+ */
+ if (!adreno_rb_empty(rb) && time_before(jiffies,
+ adreno_dev->cur_rb->sched_timer +
+ msecs_to_jiffies(adreno_dispatch_time_slice)))
+ next = rb;
+ else
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED:
+ default:
+ break;
+ }
+ }
+
+ return next;
+}
+
+static void a4xx_preempt_clear_state(struct adreno_device *adreno_dev)
+
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_ringbuffer *highest_busy_rb;
+ int switch_low_to_high;
+ int ret;
+
+ /* Device not awake means there is nothing to do */
+ if (!kgsl_state_is_awake(device))
+ return;
+
+ highest_busy_rb = a4xx_next_ringbuffer(adreno_dev);
+ if (!highest_busy_rb || highest_busy_rb == adreno_dev->cur_rb)
+ return;
+
+ switch_low_to_high = adreno_compare_prio_level(
+ highest_busy_rb->id,
+ adreno_dev->cur_rb->id);
+
+ if (switch_low_to_high < 0) {
+ /*
+ * if switching to lower priority make sure that the rptr and
+ * wptr are equal, when the lower rb is not starved
+ */
+ if (!adreno_rb_empty(adreno_dev->cur_rb))
+ return;
+ /*
+ * switch to default context because when we switch back
+ * to higher context then its not known which pt will
+ * be current, so by making it default here the next
+ * commands submitted will set the right pt
+ */
+ ret = adreno_drawctxt_switch(adreno_dev,
+ adreno_dev->cur_rb,
+ NULL, 0);
+ /*
+ * lower priority RB has to wait until space opens up in
+ * higher RB
+ */
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to switch context to NULL: %d",
+ ret);
+
+ return;
+ }
+
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_CP_PREEMPT_DISABLE, 1);
+ }
+
+ /*
+ * setup registers to do the switch to highest priority RB
+ * which is not empty or may be starving away(poor thing)
+ */
+ a4xx_preemption_start(adreno_dev, highest_busy_rb);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
+
+ adreno_dev->next_rb = highest_busy_rb;
+ mod_timer(&adreno_dev->preempt.timer, jiffies +
+ msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
+
+ trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+ /* issue PREEMPT trigger */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
+
+ /* submit preempt token packet to ensure preemption */
+ if (switch_low_to_high < 0) {
+ ret = a4xx_submit_preempt_token(
+ adreno_dev->cur_rb, adreno_dev->next_rb);
+ KGSL_DRV_ERR(device,
+ "Unable to submit preempt token: %d\n", ret);
+ adreno_dev->preempt.token_submit = true;
+ adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
+ } else {
+ adreno_dev->preempt.token_submit = false;
+ adreno_dispatcher_schedule(device);
+ adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
+ }
+}
+
+static void a4xx_preempt_complete_state(struct adreno_device *adreno_dev)
+
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int wptr, rbbase;
+ unsigned int val, val1;
+ unsigned int prevrptr;
+
+ del_timer_sync(&adreno_dev->preempt.timer);
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val1);
+
+ if (val || !val1) {
+ KGSL_DRV_ERR(device,
+ "Invalid state after preemption CP_PREEMPT: %08x, CP_PREEMPT_DEBUG: %08x\n",
+ val, val1);
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
+ if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
+ KGSL_DRV_ERR(device,
+ "RBBASE incorrect after preemption, expected %x got %016llx\b",
+ rbbase,
+ adreno_dev->next_rb->buffer_desc.gpuaddr);
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
+ a4xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
+
+ /* new RB is the current RB */
+ trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
+ adreno_dev->cur_rb,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_get_rptr(adreno_dev->cur_rb));
+
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->cur_rb->preempted_midway = 0;
+ adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
+ adreno_dev->next_rb = NULL;
+
+ if (adreno_disp_preempt_fair_sched) {
+ /* starved rb is now scheduled so unhalt dispatcher */
+ if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
+ adreno_dev->cur_rb->starve_timer_state)
+ adreno_put_gpu_halt(adreno_dev);
+ adreno_dev->cur_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
+ adreno_dev->cur_rb->sched_timer = jiffies;
+ /*
+ * If the outgoing RB is has commands then set the
+ * busy time for it
+ */
+ if (!adreno_rb_empty(adreno_dev->prev_rb)) {
+ adreno_dev->prev_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
+ adreno_dev->prev_rb->sched_timer = jiffies;
+ } else {
+ adreno_dev->prev_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
+ }
+ }
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ prevrptr = adreno_get_rptr(adreno_dev->prev_rb);
+
+ if (adreno_compare_prio_level(adreno_dev->prev_rb->id,
+ adreno_dev->cur_rb->id) < 0) {
+ if (adreno_dev->prev_rb->wptr_preempt_end != prevrptr)
+ adreno_dev->prev_rb->preempted_midway = 1;
+ }
+
+ /* submit wptr if required for new rb */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+ if (adreno_dev->cur_rb->wptr != wptr) {
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ adreno_dev->cur_rb->wptr);
+ }
+ /* clear preemption register */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, 0);
+}
+
+void a4xx_preemption_schedule(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ mutex_lock(&device->mutex);
+
+ switch (atomic_read(&adreno_dev->preempt.state)) {
+ case ADRENO_PREEMPT_NONE:
+ a4xx_preempt_clear_state(adreno_dev);
+ break;
+ case ADRENO_PREEMPT_TRIGGERED:
+ a4xx_preempt_trig_state(adreno_dev);
+ /*
+ * if we transitioned to next state then fall-through
+ * processing to next state
+ */
+ if (!adreno_in_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE))
+ break;
+ case ADRENO_PREEMPT_COMPLETE:
+ a4xx_preempt_complete_state(adreno_dev);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
+int a4xx_preemption_init(struct adreno_device *adreno_dev)
+{
+ setup_timer(&adreno_dev->preempt.timer, a4xx_preemption_timer,
+ (unsigned long) adreno_dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index b07e970aae32..6921af5c0ab5 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -534,9 +534,6 @@ void a4xx_snapshot(struct adreno_device *adreno_dev,
kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0);
kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2, 0);
- /* Turn on MMU clocks since we read MMU registers */
- kgsl_mmu_enable_clk(&device->mmu);
-
/* Master set of (non debug) registers */
SNAPSHOT_REGISTERS(device, snapshot, a4xx_registers);
@@ -554,8 +551,6 @@ void a4xx_snapshot(struct adreno_device *adreno_dev,
a4xx_vbif_snapshot_registers,
ARRAY_SIZE(a4xx_vbif_snapshot_registers));
- kgsl_mmu_disable_clk(&device->mmu);
-
kgsl_snapshot_indexed_registers(device, snapshot,
A4XX_CP_STATE_DEBUG_INDEX, A4XX_CP_STATE_DEBUG_DATA,
0, snap_data->sect_sizes->cp_pfp);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 512dcd483f45..96f72c59e4cd 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -60,19 +60,12 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a506, a530_vbif },
};
-#define PREEMPT_RECORD(_field) \
- offsetof(struct a5xx_cp_preemption_record, _field)
-
-#define PREEMPT_SMMU_RECORD(_field) \
- offsetof(struct a5xx_cp_smmu_info, _field)
-
static void a5xx_irq_storm_worker(struct work_struct *work);
static int _read_fw2_block_header(uint32_t *header, uint32_t id,
uint32_t major, uint32_t minor);
static void a5xx_gpmu_reset(struct work_struct *work);
static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
-
/**
* Number of times to check if the regulator enabled before
* giving up and returning failure.
@@ -108,8 +101,9 @@ static void spin_idle_debug(struct kgsl_device *device,
kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
dev_err(device->dev,
- " rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
- rptr, wptr, status, status3, intstatus);
+ "rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
+ adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
+
dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
kgsl_device_snapshot(device, NULL);
@@ -179,277 +173,6 @@ static void a5xx_check_features(struct adreno_device *adreno_dev)
adreno_efuse_unmap(adreno_dev);
}
-/*
- * a5xx_preemption_start() - Setup state to start preemption
- */
-static void a5xx_preemption_start(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
- uint64_t ttbr0;
- uint32_t contextidr;
- struct kgsl_pagetable *pt;
- bool switch_default_pt = true;
-
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), rb->wptr);
- kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
- lower_32_bits(rb->preemption_desc.gpuaddr));
- kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
- upper_32_bits(rb->preemption_desc.gpuaddr));
- kgsl_sharedmem_readq(&rb->pagetable_desc, &ttbr0,
- offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0));
- kgsl_sharedmem_readl(&rb->pagetable_desc, &contextidr,
- offsetof(struct adreno_ringbuffer_pagetable_info, contextidr));
-
- spin_lock(&kgsl_driver.ptlock);
- list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kgsl_mmu_pagetable_get_ttbr0(pt) == ttbr0) {
- switch_default_pt = false;
- break;
- }
- }
- spin_unlock(&kgsl_driver.ptlock);
-
- if (switch_default_pt) {
- ttbr0 = kgsl_mmu_pagetable_get_ttbr0(
- device->mmu.defaultpagetable);
- contextidr = kgsl_mmu_pagetable_get_contextidr(
- device->mmu.defaultpagetable);
- }
-
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- offsetof(struct a5xx_cp_smmu_info, ttbr0), ttbr0);
- kgsl_sharedmem_writel(device, &iommu->smmu_info,
- offsetof(struct a5xx_cp_smmu_info, context_idr), contextidr);
-}
-
-/*
- * a5xx_preemption_save() - Save the state after preemption is done
- */
-static void a5xx_preemption_save(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- /* save the rptr from ctxrecord here */
- kgsl_sharedmem_readl(&rb->preemption_desc, &rb->rptr,
- PREEMPT_RECORD(rptr));
-}
-
-#ifdef CONFIG_QCOM_KGSL_IOMMU
-static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
-
- /* Allocate mem for storing preemption smmu record */
- return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
-}
-#else
-static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
-{
- return -ENODEV;
-}
-#endif
-
-static int a5xx_preemption_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *rb;
- int ret;
- unsigned int i;
- uint64_t addr;
-
- /* We are dependent on IOMMU to make preemption go on the CP side */
- if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
- return -ENODEV;
-
- /* Allocate mem for storing preemption counters */
- ret = kgsl_allocate_global(device, &adreno_dev->preemption_counters,
- adreno_dev->num_ringbuffers *
- A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0);
- if (ret)
- return ret;
-
- addr = adreno_dev->preemption_counters.gpuaddr;
-
- /* Allocate mem for storing preemption switch record */
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- ret = kgsl_allocate_global(device,
- &rb->preemption_desc, A5XX_CP_CTXRECORD_SIZE_IN_BYTES,
- 0, KGSL_MEMDESC_PRIVILEGED);
- if (ret)
- return ret;
-
- /* Initialize the context switch record here */
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(magic), A5XX_CP_CTXRECORD_MAGIC_REF);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(info), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(data), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(cntl), 0x0800000C);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(rptr), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), 0);
- kgsl_sharedmem_writeq(device, &rb->preemption_desc,
- PREEMPT_RECORD(rbase),
- adreno_dev->ringbuffers[i].buffer_desc.gpuaddr);
- kgsl_sharedmem_writeq(device, &rb->preemption_desc,
- PREEMPT_RECORD(counter), addr);
-
- addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
- }
-
- return a5xx_preemption_iommu_init(adreno_dev);
-}
-
-/*
- * a5xx_preemption_token() - Preempt token on a5xx
- * PM4 commands for preempt token on a5xx. These commands are
- * submitted to ringbuffer to trigger preemption.
- */
-static int a5xx_preemption_token(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- uint64_t gpuaddr)
-{
- unsigned int *cmds_orig = cmds;
-
- *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
- cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
- *cmds++ = 1;
- /* generate interrupt on preemption completion */
- *cmds++ = 1;
-
- return cmds - cmds_orig;
-
-}
-
-/*
- * a5xx_preemption_pre_ibsubmit() - Below PM4 commands are
- * added at the beginning of every cmdbatch submission.
- */
-static int a5xx_preemption_pre_ibsubmit(
- struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context, uint64_t cond_addr,
- struct kgsl_memobj_node *ib)
-{
- unsigned int *cmds_orig = cmds;
- uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
- unsigned int preempt_style = 0;
-
- if (context) {
- /*
- * Preemption from secure to unsecure needs Zap shader to be
- * run to clear all secure content. CP does not know during
- * preemption if it is switching between secure and unsecure
- * contexts so restrict Secure contexts to be preempted at
- * ringbuffer level.
- */
- if (context->flags & KGSL_CONTEXT_SECURE)
- preempt_style = KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER;
- else
- preempt_style = ADRENO_PREEMPT_STYLE(context->flags);
- }
-
- /*
- * CP_PREEMPT_ENABLE_GLOBAL(global preemption) can only be set by KMD
- * in ringbuffer.
- * 1) set global preemption to 0x0 to disable global preemption.
- * Only RB level preemption is allowed in this mode
- * 2) Set global preemption to defer(0x2) for finegrain preemption.
- * when global preemption is set to defer(0x2),
- * CP_PREEMPT_ENABLE_LOCAL(local preemption) determines the
- * preemption point. Local preemption
- * can be enabled by both UMD(within IB) and KMD.
- */
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
- *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN)
- ? 2 : 0);
-
- /* Turn CP protection OFF */
- *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
- *cmds++ = 0;
-
- /*
- * CP during context switch will save context switch info to
- * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
- */
- *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
- *cmds++ = lower_32_bits(gpuaddr);
- *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
- *cmds++ = upper_32_bits(gpuaddr);
-
- /* Turn CP protection ON */
- *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
- *cmds++ = 1;
-
- /*
- * Enable local preemption for finegrain preemption in case of
- * a misbehaving IB
- */
- if (preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN) {
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
- *cmds++ = 1;
- } else {
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
- *cmds++ = 0;
- }
-
- /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */
- *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
- *cmds++ = 2;
-
- return cmds - cmds_orig;
-}
-
-/*
- * a5xx_preemption_yield_enable() - Below PM4 commands are
- * added after every cmdbatch submission.
- */
-static int a5xx_preemption_yield_enable(unsigned int *cmds)
-{
- /*
- * SRM -- set render mode (ex binning, direct render etc)
- * SRM is set by UMD usually at start of IB to tell CP the type of
- * preemption.
- * KMD needs to set SRM to NULL to indicate CP that rendering is
- * done by IB.
- */
- *cmds++ = cp_type7_packet(CP_SET_RENDER_MODE, 5);
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
-
- *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
- *cmds++ = 1;
-
- return 8;
-}
-
-/*
- * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
- * added after every cmdbatch submission.
- */
-static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int ctx_id = context ? context->id : 0;
-
- return a5xx_preemption_token(adreno_dev, rb, cmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(ctx_id, preempted));
-
-}
-
static void a5xx_platform_setup(struct adreno_device *adreno_dev)
{
uint64_t addr;
@@ -1972,12 +1695,8 @@ out:
static void a5xx_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- unsigned int i, bit;
- struct adreno_ringbuffer *rb;
- uint64_t def_ttbr0;
- uint32_t contextidr;
+ unsigned int bit;
adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
ARRAY_SIZE(a5xx_vbif_platforms));
@@ -2178,58 +1897,21 @@ static void a5xx_start(struct adreno_device *adreno_dev)
}
- if (adreno_is_preemption_enabled(adreno_dev)) {
- struct kgsl_pagetable *pt = device->mmu.defaultpagetable;
-
- def_ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
- contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
-
- /* Initialize the context switch record here */
- kgsl_sharedmem_writel(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(magic),
- A5XX_CP_SMMU_INFO_MAGIC_REF);
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(ttbr0), def_ttbr0);
- /*
- * The CP doesn't actually use the asid field, so
- * put a bad value into it until it is removed from
- * the preemption record.
- */
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(asid),
- 0xdecafbad);
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(context_idr),
- contextidr);
- adreno_writereg64(adreno_dev,
- ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
- ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
- iommu->smmu_info.gpuaddr);
-
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(rptr), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), 0);
- kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- ttbr0), def_ttbr0);
- }
- }
-
+ a5xx_preemption_start(adreno_dev);
a5xx_protect_init(adreno_dev);
}
+/*
+ * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
+ * to a different ringbuffer, if desired
+ */
static int _preemption_init(
struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb, unsigned int *cmds,
struct kgsl_context *context)
{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int *cmds_orig = cmds;
uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
- uint64_t gpuaddr_token = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(0, preempted);
/* Turn CP protection OFF */
*cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
@@ -2258,8 +1940,8 @@ static int _preemption_init(
*cmds++ = 1;
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
- cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr_token);
- *cmds++ = 1;
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
+ *cmds++ = 0;
/* generate interrupt on preemption completion */
*cmds++ = 1;
@@ -2297,7 +1979,7 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
if (adreno_is_preemption_enabled(adreno_dev))
cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
- rb->wptr = rb->wptr - (42 - (cmds - start));
+ rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret)
@@ -2595,8 +2277,15 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
+ struct kgsl_device *device = &adreno_dev->dev;
+ uint64_t addr;
int ret;
+ addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
+
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
+
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
@@ -2605,8 +2294,7 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
- (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
- (1 << 27));
+ A5XX_CP_RB_CNTL_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
@@ -3147,6 +2835,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ A5XX_CP_RB_RPTR_ADDR_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
+ A5XX_CP_RB_RPTR_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A5XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A5XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A5XX_CP_CNTL),
@@ -3416,6 +3108,8 @@ static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
prev = cur;
}
+ a5xx_preemption_trigger(adreno_dev);
+
kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -3500,9 +3194,6 @@ void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
(1 << A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW) | \
(1 << A5XX_INT_RBBM_GPC_ERROR) | \
(1 << A5XX_INT_CP_HW_ERROR) | \
- (1 << A5XX_INT_CP_IB1) | \
- (1 << A5XX_INT_CP_IB2) | \
- (1 << A5XX_INT_CP_RB) | \
(1 << A5XX_INT_CP_CACHE_FLUSH_TS) | \
(1 << A5XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
(1 << A5XX_INT_UCHE_OOB_ACCESS) | \
@@ -3525,7 +3216,7 @@ static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a5xx_err_callback),
ADRENO_IRQ_CALLBACK(a5x_gpc_err_int_callback), /* 7 - GPC_ERR */
- ADRENO_IRQ_CALLBACK(adreno_dispatcher_preempt_callback),/* 8 - CP_SW */
+ ADRENO_IRQ_CALLBACK(a5xx_preempt_callback),/* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a5xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
/* 10 - CP_CCU_FLUSH_DEPTH_TS */
ADRENO_IRQ_CALLBACK(NULL),
@@ -3533,9 +3224,9 @@ static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
ADRENO_IRQ_CALLBACK(NULL),
/* 12 - CP_CCU_RESOLVE_TS */
ADRENO_IRQ_CALLBACK(NULL),
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 13 - CP_IB2_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 14 - CP_IB1_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 15 - CP_RB_INT */
/* 16 - CCP_UNUSED_1 */
ADRENO_IRQ_CALLBACK(NULL),
ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
@@ -3772,323 +3463,6 @@ static struct adreno_coresight a5xx_coresight = {
.groups = a5xx_coresight_groups,
};
-/**
- * a5xx_preempt_trig_state() - Schedule preemption in TRIGGERRED
- * state
- * @adreno_dev: Device which is in TRIGGERRED state
- */
-static void a5xx_preempt_trig_state(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int preempt_busy;
- uint64_t rbbase;
-
- /*
- * triggered preemption, check for busy bits, if not set go to complete
- * bit 0: When high indicates CP is not done with preemption.
- * bit 4: When high indicates that the CP is actively switching between
- * application contexts.
- * Check both the bits to make sure CP is done with preemption.
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &preempt_busy);
- if (!(preempt_busy & 0x11)) {
-
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
- ADRENO_REG_CP_RB_BASE_HI, &rbbase);
- /* Did preemption occur, if so then change states and return */
- if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
- if (rbbase ==
- adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_INFO(device,
- "Preemption completed without interrupt\n");
- trace_adreno_hw_preempt_trig_to_comp(
- adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- } else {
- /*
- * Something wrong with preemption.
- * Set fault and reschedule dispatcher to take
- * care of fault.
- */
- adreno_set_gpu_fault(adreno_dev,
- ADRENO_PREEMPT_FAULT);
- }
- adreno_dispatcher_schedule(device);
- return;
- }
- }
-
- /*
- * Preemption is still happening.
- * Hardware not yet idle means that preemption interrupt
- * may still occur, nothing to do here until interrupt signals
- * completion of preemption, just return here
- */
- if (!adreno_hw_isidle(adreno_dev))
- return;
-
- /*
- * We just changed states, reschedule dispatcher to change
- * preemption states
- */
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
-
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
-
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * a5xx_preempt_clear_state() - Schedule preemption in CLEAR
- * state. Preemption can be issued in this state.
- * @adreno_dev: Device which is in CLEAR state
- */
-static void a5xx_preempt_clear_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *highest_busy_rb;
- int switch_low_to_high;
- int ret;
-
- /* Device not awake means there is nothing to do */
- if (!kgsl_state_is_awake(device))
- return;
-
- /* keep updating the current rptr when preemption is clear */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
- if (!highest_busy_rb)
- return;
-
- switch_low_to_high = adreno_compare_prio_level(
- highest_busy_rb->id, adreno_dev->cur_rb->id);
-
- /* already current then return */
- if (!switch_low_to_high)
- return;
-
- if (switch_low_to_high < 0) {
-
- if (!adreno_hw_isidle(adreno_dev)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
- /*
- * if switching to lower priority make sure that the rptr and
- * wptr are equal, when the lower rb is not starved
- */
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
- return;
- /*
- * switch to default context because when we switch back
- * to higher context then its not known which pt will
- * be current, so by making it default here the next
- * commands submitted will set the right pt
- */
- ret = adreno_drawctxt_switch(adreno_dev,
- adreno_dev->cur_rb,
- NULL, 0);
- /*
- * lower priority RB has to wait until space opens up in
- * higher RB
- */
- if (ret)
- return;
- }
-
- /* rptr could be updated in drawctxt switch above, update it here */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- /* turn on IOMMU as the preemption may trigger pt switch */
- kgsl_mmu_enable_clk(&device->mmu);
-
- /*
- * setup memory to do the switch to highest priority RB
- * which is not empty or may be starving away(poor thing)
- */
- a5xx_preemption_start(adreno_dev, highest_busy_rb);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED);
-
- adreno_dev->next_rb = highest_busy_rb;
- mod_timer(&dispatcher->preempt_timer, jiffies +
- msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));
-
- trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- /* issue PREEMPT trigger */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
-
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * a5xx_preempt_complete_state() - Schedule preemption in
- * COMPLETE state
- * @adreno_dev: Device which is in COMPLETE state
- */
-static void a5xx_preempt_complete_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_q;
- uint64_t rbbase;
- unsigned int wptr;
- unsigned int val;
- static unsigned long wait_for_preemption_complete;
-
- del_timer_sync(&dispatcher->preempt_timer);
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
-
- if (val) {
- /*
- * Wait for 50ms for preemption state to be updated by CP
- * before triggering hang
- */
- if (wait_for_preemption_complete == 0)
- wait_for_preemption_complete = jiffies +
- msecs_to_jiffies(50);
- if (time_after(jiffies, wait_for_preemption_complete)) {
- wait_for_preemption_complete = 0;
- KGSL_DRV_ERR(device,
- "Invalid state after preemption CP_PREEMPT:%08x STOP:%1x BUSY:%1x\n",
- val, (val & 0x1), (val & 0x10)>>4);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- }
- adreno_dispatcher_schedule(device);
- return;
- }
-
- wait_for_preemption_complete = 0;
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
- ADRENO_REG_CP_RB_BASE_HI, &rbbase);
- if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_ERR(device,
- "RBBASE incorrect after preemption, expected %016llx got %016llx\b",
- rbbase,
- adreno_dev->next_rb->buffer_desc.gpuaddr);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
-
- a5xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
-
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- /* new RB is the current RB */
- trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
- adreno_dev->cur_rb);
- adreno_dev->prev_rb = adreno_dev->cur_rb;
- adreno_dev->cur_rb = adreno_dev->next_rb;
- adreno_dev->cur_rb->preempted_midway = 0;
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- adreno_dev->next_rb = NULL;
-
- if (adreno_disp_preempt_fair_sched) {
- /* starved rb is now scheduled so unhalt dispatcher */
- if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
- adreno_dev->cur_rb->starve_timer_state)
- adreno_put_gpu_halt(adreno_dev);
- adreno_dev->cur_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
- adreno_dev->cur_rb->sched_timer = jiffies;
- /*
- * If the outgoing RB is has commands then set the
- * busy time for it
- */
- if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- adreno_dev->prev_rb->sched_timer = jiffies;
- } else {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- }
- }
- adreno_ringbuffer_mmu_disable_clk_on_ts(device, adreno_dev->cur_rb,
- adreno_dev->cur_rb->timestamp);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
-
- /* submit wptr if required for new rb */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
- if (adreno_dev->cur_rb->wptr != wptr) {
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- }
-
- adreno_preempt_process_dispatch_queue(adreno_dev, dispatch_q);
-}
-
-static void a5xx_preemption_schedule(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *rb;
- int i = 0;
-
- if (!adreno_is_preemption_enabled(adreno_dev))
- return;
-
- mutex_lock(&device->mutex);
-
- /*
- * This barrier is needed for most updated preemption_state
- * to be read.
- */
- smp_mb();
-
- if (KGSL_STATE_ACTIVE == device->state)
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
- rb->rptr = adreno_get_rptr(rb);
-
- switch (atomic_read(&dispatcher->preemption_state)) {
- case ADRENO_DISPATCHER_PREEMPT_CLEAR:
- a5xx_preempt_clear_state(adreno_dev);
- break;
- case ADRENO_DISPATCHER_PREEMPT_TRIGGERED:
- a5xx_preempt_trig_state(adreno_dev);
- /*
- * if we transitioned to next state then fall-through
- * processing to next state
- */
- if (!adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE))
- break;
- case ADRENO_DISPATCHER_PREEMPT_COMPLETE:
- a5xx_preempt_complete_state(adreno_dev);
- break;
- default:
- BUG();
- }
-
- mutex_unlock(&device->mutex);
-}
-
struct adreno_gpudev adreno_a5xx_gpudev = {
.reg_offsets = &a5xx_reg_offsets,
.ft_perf_counters = a5xx_ft_perf_counters,
@@ -4116,7 +3490,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
a5xx_preemption_yield_enable,
.preemption_post_ibsubmit =
a5xx_preemption_post_ibsubmit,
- .preemption_token = a5xx_preemption_token,
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 6ce95ff7bdbf..7965bb7b5440 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -112,6 +112,8 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev);
void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
+#define A5XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
+ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
/* GPMU interrupt multiplexor */
#define FW_INTR_INFO (0)
#define LLM_ACK_ERR_INTR (1)
@@ -232,4 +234,22 @@ static inline bool lm_on(struct adreno_device *adreno_dev)
return ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
}
+
+/* Preemption functions */
+void a5xx_preemption_trigger(struct adreno_device *adreno_dev);
+void a5xx_preemption_schedule(struct adreno_device *adreno_dev);
+void a5xx_preemption_start(struct adreno_device *adreno_dev);
+int a5xx_preemption_init(struct adreno_device *adreno_dev);
+int a5xx_preemption_yield_enable(unsigned int *cmds);
+
+unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds);
+unsigned int a5xx_preemption_pre_ibsubmit(
+ struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context);
+
+
+void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit);
+
#endif
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
new file mode 100644
index 000000000000..c1463b824c67
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "adreno.h"
+#include "adreno_a5xx.h"
+#include "a5xx_reg.h"
+#include "adreno_trace.h"
+#include "adreno_pm4types.h"
+
+#define PREEMPT_RECORD(_field) \
+ offsetof(struct a5xx_cp_preemption_record, _field)
+
+#define PREEMPT_SMMU_RECORD(_field) \
+ offsetof(struct a5xx_cp_smmu_info, _field)
+
+static void _update_wptr(struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+ unsigned int wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+
+ if (wptr != rb->wptr) {
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ rb->wptr);
+
+ rb->dispatch_q.expires = jiffies +
+ msecs_to_jiffies(adreno_cmdbatch_timeout);
+ }
+
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
+static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states old, enum adreno_preempt_states new)
+{
+ return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
+}
+
+static void _a5xx_preemption_done(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * In the very unlikely case that the power is off, do nothing - the
+ * state will be reset on power up and everybody will be happy
+ */
+
+ if (!kgsl_state_is_awake(device))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(device,
+ "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
+ status, adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ /* Set a fault and restart */
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+
+ return;
+ }
+
+ del_timer_sync(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ /* Clean up all the bits */
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr for the new command queue */
+ _update_wptr(adreno_dev);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ /* Clear the preempt state */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+}
+
+static void _a5xx_preemption_fault(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * If the power is on check the preemption status one more time - if it
+ * was successful then just transition to the complete state
+ */
+ if (kgsl_state_is_awake(device)) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status == 0) {
+ adreno_set_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE);
+
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ }
+
+ KGSL_DRV_ERR(device,
+ "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
+ adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr,
+ adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+static void _a5xx_preemption_worker(struct work_struct *work)
+{
+ struct adreno_preemption *preempt = container_of(work,
+ struct adreno_preemption, work);
+ struct adreno_device *adreno_dev = container_of(preempt,
+ struct adreno_device, preempt);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ /* Need to take the mutex to make sure that the power stays on */
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
+ _a5xx_preemption_fault(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+static void _a5xx_preemption_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+
+ /* We should only be here from a triggered state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
+ return;
+
+ /* Schedule the worker to take care of the details */
+ queue_work(system_unbound_wq, &adreno_dev->preempt.work);
+}
+
+/* Find the highest priority active ringbuffer */
+static struct adreno_ringbuffer *a5xx_next_ringbuffer(
+ struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb;
+ unsigned long flags;
+ unsigned int i;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ bool empty;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+ empty = adreno_rb_empty(rb);
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+ if (empty == false)
+ return rb;
+ }
+
+ return NULL;
+}
+
+void a5xx_preemption_trigger(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *next;
+ uint64_t ttbr0;
+ unsigned int contextidr;
+ unsigned long flags;
+
+ /* Put ourselves into a possible trigger state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
+ return;
+
+ /* Get the next ringbuffer to preempt in */
+ next = a5xx_next_ringbuffer(adreno_dev);
+
+ /*
+ * Nothing to do if every ringbuffer is empty or if the current
+ * ringbuffer is the only active one
+ */
+ if (next == NULL || next == adreno_dev->cur_rb) {
+ /*
+ * Update any critical things that might have been skipped while
+ * we were looking for a new ringbuffer
+ */
+
+ if (next != NULL) {
+ _update_wptr(adreno_dev);
+
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+ }
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+ return;
+ }
+
+ /* Turn off the dispatcher timer */
+ del_timer(&adreno_dev->dispatcher.timer);
+
+ /*
+ * This is the most critical section - we need to take care not to race
+ * until we have programmed the CP for the switch
+ */
+
+ spin_lock_irqsave(&next->preempt_lock, flags);
+
+ /* Get the pagetable from the pagetable info */
+ kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0,
+ PT_INFO_OFFSET(ttbr0));
+ kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr,
+ PT_INFO_OFFSET(contextidr));
+
+ kgsl_sharedmem_writel(device, &next->preemption_desc,
+ PREEMPT_RECORD(wptr), next->wptr);
+
+ spin_unlock_irqrestore(&next->preempt_lock, flags);
+
+ /* And write it to the smmu info */
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr), contextidr);
+
+ kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ lower_32_bits(next->preemption_desc.gpuaddr));
+ kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ upper_32_bits(next->preemption_desc.gpuaddr));
+
+ adreno_dev->next_rb = next;
+
+ /* Start the timer to detect a stuck preemption */
+ mod_timer(&adreno_dev->preempt.timer,
+ jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
+
+ trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
+
+ /* Trigger the preemption */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
+}
+
+void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
+{
+ unsigned int status;
+
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
+ "preempt interrupt with non-zero status: %X\n", status);
+
+ /*
+ * Under the assumption that this is a race between the
+ * interrupt and the register, schedule the worker to clean up.
+ * If the status still hasn't resolved itself by the time we get
+ * there then we have to assume something bad happened
+ */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
+ return;
+ }
+
+ del_timer(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb,
+ adreno_dev->next_rb);
+
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr if it changed while preemption was ongoing */
+ _update_wptr(adreno_dev);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+}
+
+void a5xx_preemption_schedule(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
+ _a5xx_preemption_done(adreno_dev);
+
+ a5xx_preemption_trigger(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+unsigned int a5xx_preemption_pre_ibsubmit(
+ struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context)
+{
+ unsigned int *cmds_orig = cmds;
+ uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
+ unsigned int preempt_style = 0;
+
+ if (context) {
+ /*
+ * Preemption from secure to unsecure needs Zap shader to be
+ * run to clear all secure content. CP does not know during
+ * preemption if it is switching between secure and unsecure
+ * contexts so restrict Secure contexts to be preempted at
+ * ringbuffer level.
+ */
+ if (context->flags & KGSL_CONTEXT_SECURE)
+ preempt_style = KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER;
+ else
+ preempt_style = ADRENO_PREEMPT_STYLE(context->flags);
+ }
+
+ /*
+ * CP_PREEMPT_ENABLE_GLOBAL(global preemption) can only be set by KMD
+ * in ringbuffer.
+ * 1) set global preemption to 0x0 to disable global preemption.
+ * Only RB level preemption is allowed in this mode
+ * 2) Set global preemption to defer(0x2) for finegrain preemption.
+ * when global preemption is set to defer(0x2),
+ * CP_PREEMPT_ENABLE_LOCAL(local preemption) determines the
+ * preemption point. Local preemption
+ * can be enabled by both UMD(within IB) and KMD.
+ */
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
+ *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN)
+ ? 2 : 0);
+
+ /* Turn CP protection OFF */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 0;
+
+ /*
+ * CP during context switch will save context switch info to
+ * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
+ */
+ *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
+ *cmds++ = upper_32_bits(gpuaddr);
+
+ /* Turn CP protection ON */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 1;
+
+ /*
+ * Enable local preemption for finegrain preemption in case of
+ * a misbehaving IB
+ */
+ if (preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN) {
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
+ *cmds++ = 1;
+ } else {
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
+ *cmds++ = 0;
+ }
+
+ /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */
+ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
+ *cmds++ = 2;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+int a5xx_preemption_yield_enable(unsigned int *cmds)
+{
+ /*
+ * SRM -- set render mode (ex binning, direct render etc)
+ * SRM is set by UMD usually at start of IB to tell CP the type of
+ * preemption.
+ * KMD needs to set SRM to NULL to indicate CP that rendering is
+ * done by IB.
+ */
+ *cmds++ = cp_type7_packet(CP_SET_RENDER_MODE, 5);
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
+ *cmds++ = 1;
+
+ return 8;
+}
+
+unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds)
+{
+ int dwords = 0;
+
+ cmds[dwords++] = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
+ /* Write NULL to the address to skip the data write */
+ dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], 0x0);
+ cmds[dwords++] = 1;
+ /* generate interrupt on preemption completion */
+ cmds[dwords++] = 1;
+
+ return dwords;
+}
+
+void a5xx_preemption_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ /* Force the state to be clear */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(magic), A5XX_CP_SMMU_INFO_MAGIC_REF);
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
+
+ /* The CP doesn't use the asid record, so poison it */
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr),
+ MMU_DEFAULT_CONTEXTIDR(device));
+
+ adreno_writereg64(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ iommu->smmu_info.gpuaddr);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+
+ adreno_ringbuffer_set_pagetable(rb,
+ device->mmu.defaultpagetable);
+ }
+
+}
+
+static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, uint64_t counteraddr)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ int ret;
+
+ ret = kgsl_allocate_global(device, &rb->preemption_desc,
+ A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED);
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(magic), A5XX_CP_CTXRECORD_MAGIC_REF);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(info), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(data), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(cntl), A5XX_CP_RB_CNTL_DEFAULT);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device,
+ rb->id));
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(counter), counteraddr);
+
+ return 0;
+}
+
+#ifdef CONFIG_QCOM_KGSL_IOMMU
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ /* Allocate mem for storing preemption smmu record */
+ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
+ KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
+}
+#else
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ return -ENODEV;
+}
+#endif
+
+int a5xx_preemption_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ int ret;
+ unsigned int i;
+ uint64_t addr;
+
+ /* We are dependent on IOMMU to make preemption go on the CP side */
+ if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
+ return -ENODEV;
+
+ INIT_WORK(&preempt->work, _a5xx_preemption_worker);
+
+ setup_timer(&preempt->timer, _a5xx_preemption_timer,
+ (unsigned long) adreno_dev);
+
+ /* Allocate mem for storing preemption counters */
+ ret = kgsl_allocate_global(device, &preempt->counters,
+ adreno_dev->num_ringbuffers *
+ A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0);
+ if (ret)
+ return ret;
+
+ addr = preempt->counters.gpuaddr;
+
+ /* Allocate mem for storing preemption switch record */
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
+ if (ret)
+ return ret;
+
+ addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
+ }
+
+ return a5xx_preemption_iommu_init(adreno_dev);
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 1a1db3ab3dc9..9cbcd06d7658 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -226,8 +226,7 @@ static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
return;
- seq_printf(s, "\t%d: ib: expires: %lu",
- cmdbatch->timestamp, cmdbatch->expires);
+ seq_printf(s, "\t%d: ", cmdbatch->timestamp);
seq_puts(s, " flags: ");
print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 3f36a93ea110..ac3805800691 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -28,10 +28,10 @@
#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
-static unsigned int _dispatch_starvation_time = 2000;
+unsigned int adreno_dispatch_starvation_time = 2000;
/* Amount of time in ms that a starved RB is permitted to execute for */
-static unsigned int _dispatch_time_slice = 25;
+unsigned int adreno_dispatch_time_slice = 25;
/*
* If set then dispatcher tries to schedule lower priority RB's after if they
@@ -78,6 +78,24 @@ unsigned int adreno_cmdbatch_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
+#define CMDQUEUE_RB(_cmdqueue) \
+ ((struct adreno_ringbuffer *) \
+ container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+
+#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+
+static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue);
+
+static inline bool cmdqueue_is_current(
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+
+ return (adreno_dev->cur_rb == rb);
+}
+
static void _add_context(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
@@ -283,7 +301,8 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
/* Retire pending GPU events for the object */
kgsl_process_event_group(device, &context->events);
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb);
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb));
kgsl_cmdbatch_destroy(cmdbatch);
}
@@ -576,8 +595,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatcher->inflight == 1) {
if (ret == 0) {
+
+ /* Stop fault timer before reading fault registers */
+ del_timer_sync(&dispatcher->fault_timer);
+
fault_detect_read(adreno_dev);
+ /* Start the fault timer on first submission */
+ start_fault_timer(adreno_dev);
+
if (!test_and_set_bit(ADRENO_DISPATCHER_ACTIVE,
&dispatcher->priv))
reinit_completion(&dispatcher->idle_gate);
@@ -594,11 +620,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
dispatch_q->inflight--;
/*
+ * Don't log a message in case of:
* -ENOENT means that the context was detached before the
- * command was submitted - don't log a message in that case
+ * command was submitted
+ * -ENOSPC means that there temporarily isn't any room in the
+ * ringbuffer
+ * -PROTO means that a fault is currently being worked
*/
- if (ret != -ENOENT)
+ if (ret != -ENOENT && ret != -ENOSPC && ret != -EPROTO)
KGSL_DRV_ERR(device,
"Unable to submit command to the ringbuffer %d\n",
ret);
@@ -609,7 +639,8 @@ static int sendcmd(struct adreno_device *adreno_dev,
nsecs = do_div(secs, 1000000000);
trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
- time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb);
+ time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb));
cmdbatch->submit_ticks = time.ticks;
@@ -618,28 +649,26 @@ static int sendcmd(struct adreno_device *adreno_dev,
ADRENO_DISPATCH_CMDQUEUE_SIZE;
/*
- * If this is the first command in the pipe then the GPU will
- * immediately start executing it so we can start the expiry timeout on
- * the command batch here. Subsequent command batches will have their
- * timer started when the previous command batch is retired.
- * Set the timer if the cmdbatch was submitted to current
- * active RB else this timer will need to be set when the
- * RB becomes active, also if dispatcher is not is CLEAR
- * state then the cmdbatch it is currently executing is
- * unclear so do not set timer in that case either.
+ * For the first submission in any given command queue update the
+ * expected expire time - this won't actually be used / updated until
+ * the command queue in question goes current, but universally setting
+ * it here avoids the possibilty of some race conditions with preempt
*/
- if (1 == dispatch_q->inflight &&
- (&(adreno_dev->cur_rb->dispatch_q)) == dispatch_q &&
- adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) {
- cmdbatch->expires = jiffies +
+
+ if (dispatch_q->inflight == 1)
+ dispatch_q->expires = jiffies +
msecs_to_jiffies(adreno_cmdbatch_timeout);
- mod_timer(&dispatcher->timer, cmdbatch->expires);
+
+ /*
+ * If we believe ourselves to be current and preemption isn't a thing,
+ * then set up the timer. If this misses, then preemption is indeed a
+ * thing and the timer will be set up in due time
+ */
+ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+ if (cmdqueue_is_current(dispatch_q))
+ mod_timer(&dispatcher->timer, dispatch_q->expires);
}
- /* Start the fault detection timer on the first submission */
- if (dispatcher->inflight == 1)
- start_fault_timer(adreno_dev);
/*
* we just submitted something, readjust ringbuffer
@@ -924,87 +953,6 @@ static int get_timestamp(struct adreno_context *drawctxt,
}
/**
- * adreno_dispatcher_preempt_timer() - Timer that triggers when preemption has
- * not completed
- * @data: Pointer to adreno device that did not preempt in timely manner
- */
-static void adreno_dispatcher_preempt_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
- "Preemption timed out. cur_rb rptr/wptr %x/%x id %d, next_rb rptr/wptr %x/%x id %d, disp_state: %d\n",
- adreno_dev->cur_rb->rptr, adreno_dev->cur_rb->wptr,
- adreno_dev->cur_rb->id, adreno_dev->next_rb->rptr,
- adreno_dev->next_rb->wptr, adreno_dev->next_rb->id,
- atomic_read(&dispatcher->preemption_state));
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-}
-
-/**
- * adreno_dispatcher_get_highest_busy_rb() - Returns the highest priority RB
- * which is busy
- * @adreno_dev: Device whose RB is returned
- */
-struct adreno_ringbuffer *adreno_dispatcher_get_highest_busy_rb(
- struct adreno_device *adreno_dev)
-{
- struct adreno_ringbuffer *rb, *highest_busy_rb = NULL;
- int i;
-
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->rptr != rb->wptr && !highest_busy_rb) {
- highest_busy_rb = rb;
- goto done;
- }
-
- if (!adreno_disp_preempt_fair_sched)
- continue;
-
- switch (rb->starve_timer_state) {
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT:
- if (rb->rptr != rb->wptr &&
- adreno_dev->cur_rb != rb) {
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- rb->sched_timer = jiffies;
- }
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT:
- if (time_after(jiffies, rb->sched_timer +
- msecs_to_jiffies(_dispatch_starvation_time))) {
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED;
- /* halt dispatcher to remove starvation */
- adreno_get_gpu_halt(adreno_dev);
- }
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED:
- BUG_ON(adreno_dev->cur_rb != rb);
- /*
- * If the RB has not been running for the minimum
- * time slice then allow it to run
- */
- if ((rb->rptr != rb->wptr) && time_before(jiffies,
- adreno_dev->cur_rb->sched_timer +
- msecs_to_jiffies(_dispatch_time_slice)))
- highest_busy_rb = rb;
- else
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED:
- default:
- break;
- }
- }
-done:
- return highest_busy_rb;
-}
-
-/**
* adreno_dispactcher_queue_cmd() - Queue a new command in the context
* @adreno_dev: Pointer to the adreno device struct
* @drawctxt: Pointer to the adreno draw context
@@ -1433,7 +1381,7 @@ static void adreno_fault_header(struct kgsl_device *device,
if (rb != NULL)
pr_fault(device, cmdbatch,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
- rb->id, rb->rptr, rb->wptr);
+ rb->id, rptr, rb->wptr);
} else {
int id = (rb != NULL) ? rb->id : -1;
@@ -1444,7 +1392,7 @@ static void adreno_fault_header(struct kgsl_device *device,
if (rb != NULL)
dev_err(device->dev,
"RB[%d] gpu fault rb sw r/w %4.4x/%4.4x\n",
- rb->id, rb->rptr, rb->wptr);
+ rb->id, rptr, rb->wptr);
}
}
@@ -1751,6 +1699,27 @@ replay:
kfree(replay);
}
+static void do_header_and_snapshot(struct kgsl_device *device,
+ struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+{
+ /* Always dump the snapshot on a non-cmdbatch failure */
+ if (cmdbatch == NULL) {
+ adreno_fault_header(device, rb, NULL);
+ kgsl_device_snapshot(device, NULL);
+ return;
+ }
+
+ /* Skip everything if the PMDUMP flag is set */
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ return;
+
+ /* Print the fault header */
+ adreno_fault_header(device, rb, cmdbatch);
+
+ if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, cmdbatch->context);
+}
+
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1787,7 +1756,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Turn off all the timers */
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
- del_timer_sync(&dispatcher->preempt_timer);
+ del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
@@ -1813,14 +1782,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
* retire cmdbatches from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_process_cmdqueue(adreno_dev,
- &(rb->dispatch_q), 0);
+ adreno_dispatch_retire_cmdqueue(adreno_dev,
+ &(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
dispatch_q = &(rb->dispatch_q);
hung_rb = rb;
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &hung_rb->rptr);
if (adreno_dev->cur_rb != hung_rb) {
adreno_dev->prev_rb = adreno_dev->cur_rb;
adreno_dev->cur_rb = hung_rb;
@@ -1834,7 +1801,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && (dispatch_q->tail != dispatch_q->head)) {
+ if (!adreno_cmdqueue_is_empty(dispatch_q)) {
cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
trace_adreno_cmdbatch_fault(cmdbatch, fault);
}
@@ -1842,17 +1809,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- /*
- * Dump the snapshot information if this is the first
- * detected fault for the oldest active command batch
- */
-
- if (cmdbatch == NULL ||
- !test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
- adreno_fault_header(device, hung_rb, cmdbatch);
- kgsl_device_snapshot(device,
- cmdbatch ? cmdbatch->context : NULL);
- }
+ do_header_and_snapshot(device, hung_rb, cmdbatch);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1860,8 +1817,6 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Reset the dispatcher queue */
dispatcher->inflight = 0;
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
/* Reset the GPU and make sure halt is not set during recovery */
halt = adreno_gpu_halt(adreno_dev);
@@ -1875,12 +1830,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
if (hung_rb != NULL) {
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_MAX + hung_rb->id,
- soptimestamp), hung_rb->timestamp);
+ MEMSTORE_RB_OFFSET(hung_rb, soptimestamp),
+ hung_rb->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_MAX + hung_rb->id,
- eoptimestamp), hung_rb->timestamp);
+ MEMSTORE_RB_OFFSET(hung_rb, eoptimestamp),
+ hung_rb->timestamp);
/* Schedule any pending events to be run */
kgsl_process_event_group(device, &hung_rb->events);
@@ -1953,139 +1908,170 @@ static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
*retire = entry->retired;
}
-int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
- int long_ib_detect)
+static void retire_cmdbatch(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
{
- struct adreno_dispatcher *dispatcher = &(adreno_dev->dispatcher);
- uint64_t start_ticks = 0, retire_ticks = 0;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ uint64_t start = 0, end = 0;
- struct adreno_dispatcher_cmdqueue *active_q =
- &(adreno_dev->cur_rb->dispatch_q);
+ if (cmdbatch->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ }
+
+ if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
+ cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+
+ trace_adreno_cmdbatch_retired(cmdbatch, (int) dispatcher->inflight,
+ start, end, ADRENO_CMDBATCH_RB(cmdbatch),
+ adreno_get_rptr(drawctxt->rb));
+
+ drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
+ end - cmdbatch->submit_ticks;
+
+ drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
+ SUBMIT_RETIRE_TICKS_SIZE;
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+}
+
+static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (dispatch_q->head != dispatch_q->tail) {
+ while (!adreno_cmdqueue_is_empty(cmdqueue)) {
struct kgsl_cmdbatch *cmdbatch =
- dispatch_q->cmd_q[dispatch_q->head];
- struct adreno_context *drawctxt;
- BUG_ON(cmdbatch == NULL);
+ cmdqueue->cmd_q[cmdqueue->head];
- drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ if (!kgsl_check_timestamp(device, cmdbatch->context,
+ cmdbatch->timestamp))
+ break;
- /*
- * First try to expire the timestamp. This happens if the
- * context is valid and the timestamp expired normally or if the
- * context was destroyed before the command batch was finished
- * in the GPU. Either way retire the command batch advance the
- * pointers and continue processing the queue
- */
+ retire_cmdbatch(adreno_dev, cmdbatch);
- if (kgsl_check_timestamp(KGSL_DEVICE(adreno_dev),
- cmdbatch->context, cmdbatch->timestamp)) {
+ dispatcher->inflight--;
+ cmdqueue->inflight--;
- /*
- * If the cmdbatch in question had faulted announce its
- * successful completion to the world
- */
+ cmdqueue->cmd_q[cmdqueue->head] = NULL;
- if (cmdbatch->fault_recovery != 0) {
- /* Mark the context as faulted and recovered */
- set_bit(ADRENO_CONTEXT_FAULT,
- &cmdbatch->context->priv);
+ cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
+ ADRENO_DISPATCH_CMDQUEUE_SIZE);
- _print_recovery(KGSL_DEVICE(adreno_dev),
- cmdbatch);
- }
+ count++;
+ }
- /* Reduce the number of inflight command batches */
- dispatcher->inflight--;
- dispatch_q->inflight--;
+ return count;
+}
- /*
- * If kernel profiling is enabled get the submit and
- * retired ticks from the buffer
- */
+static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch,
- &start_ticks, &retire_ticks);
+ /* Don't timeout if the timer hasn't expired yet (duh) */
+ if (time_is_after_jiffies(cmdqueue->expires))
+ return;
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start_ticks,
- retire_ticks, ADRENO_CMDBATCH_RB(cmdbatch));
+ /* Don't timeout if the IB timeout is disabled globally */
+ if (!adreno_long_ib_detect(adreno_dev))
+ return;
- /* Record the delta between submit and retire ticks */
- drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- retire_ticks - cmdbatch->submit_ticks;
+ /* Don't time out if the context has disabled it */
+ if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ return;
- drawctxt->ticks_index = (drawctxt->ticks_index + 1)
- % SUBMIT_RETIRE_TICKS_SIZE;
+ pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
- /* Zero the old entry*/
- dispatch_q->cmd_q[dispatch_q->head] = NULL;
+ adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
+}
- /* Advance the buffer head */
- dispatch_q->head = CMDQUEUE_NEXT(dispatch_q->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
- /* Destroy the retired command batch */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* Nothing to do if there are no pending commands */
+ if (adreno_cmdqueue_is_empty(cmdqueue))
+ return count;
- /* Update the expire time for the next command batch */
+ /* Don't update the cmdqueue timeout if we are about to preempt out */
+ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
+ return count;
- if (dispatch_q->inflight > 0 &&
- dispatch_q == active_q) {
- cmdbatch =
- dispatch_q->cmd_q[dispatch_q->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(
- adreno_cmdbatch_timeout);
- }
+ /* Don't update the cmdqueue timeout if it isn't active */
+ if (!cmdqueue_is_current(cmdqueue))
+ return count;
- count++;
- continue;
- }
- /*
- * Break here if fault detection is disabled for the context or
- * if the long running IB detection is disaled device wide or
- * if the dispatch q is not active
- * Long running command buffers will be allowed to run to
- * completion - but badly behaving command buffers (infinite
- * shaders etc) can end up running forever.
- */
+ /*
+ * If the current ringbuffer retired any commands then universally
+ * reset the timeout
+ */
- if (!long_ib_detect ||
- drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE
- || dispatch_q != active_q)
- break;
+ if (count) {
+ cmdqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_cmdbatch_timeout);
+ return count;
+ }
- /*
- * The last line of defense is to check if the command batch has
- * timed out. If we get this far but the timeout hasn't expired
- * yet then the GPU is still ticking away
- */
+ /*
+ * If we get here then 1) the ringbuffer is current and 2) we haven't
+ * retired anything. Check to see if the timeout if valid for the
+ * current cmdbatch and fault if it has expired
+ */
+ _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ return 0;
+}
- if (time_is_after_jiffies(cmdbatch->expires))
- break;
+/* Update the dispatcher timers */
+static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- /* Boom goes the dynamite */
+ /* Kick the idle timer */
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_update(device);
+ mod_timer(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ mutex_unlock(&device->mutex);
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ /* Check to see if we need to update the command timer */
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+ struct adreno_dispatcher_cmdqueue *cmdqueue =
+ CMDQUEUE(adreno_dev->cur_rb);
- adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
- break;
+ if (!adreno_cmdqueue_is_empty(cmdqueue))
+ mod_timer(&dispatcher->timer, cmdqueue->expires);
}
- return count;
}
-/**
- * adreno_dispatcher_work() - Master work handler for the dispatcher
- * @work: Pointer to the work struct for the current work queue
- *
- * Process expired commands and send new ones.
- */
+/* Take down the dispatcher and release any power states */
+static void _dispatcher_power_down(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ mutex_lock(&device->mutex);
+
+ if (test_and_clear_bit(ADRENO_DISPATCHER_ACTIVE, &dispatcher->priv))
+ complete_all(&dispatcher->idle_gate);
+
+ del_timer_sync(&dispatcher->fault_timer);
+
+ if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
+ kgsl_active_count_put(device);
+ clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
static void adreno_dispatcher_work(struct work_struct *work)
{
struct adreno_dispatcher *dispatcher =
@@ -2095,95 +2081,50 @@ static void adreno_dispatcher_work(struct work_struct *work)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int count = 0;
- int cur_rb_id = adreno_dev->cur_rb->id;
+ unsigned int i = 0;
mutex_lock(&dispatcher->mutex);
- if (ADRENO_DISPATCHER_PREEMPT_CLEAR ==
- atomic_read(&dispatcher->preemption_state))
- /* process the active q*/
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- &(adreno_dev->cur_rb->dispatch_q),
- adreno_long_ib_detect(adreno_dev));
-
- else if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED ==
- atomic_read(&dispatcher->preemption_state))
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- &(adreno_dev->cur_rb->dispatch_q), 0);
-
- /* Check if gpu fault occurred */
- if (dispatcher_do_fault(adreno_dev))
- goto done;
-
- if (gpudev->preemption_schedule)
- gpudev->preemption_schedule(adreno_dev);
-
- if (cur_rb_id != adreno_dev->cur_rb->id) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- &(adreno_dev->cur_rb->dispatch_q);
- /* active level switched, clear new level cmdbatches */
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- dispatch_q,
- adreno_long_ib_detect(adreno_dev));
- /*
- * If GPU has already completed all the commands in new incoming
- * RB then we may not get another interrupt due to which
- * dispatcher may not run again. Schedule dispatcher here so
- * we can come back and process the other RB's if required
- */
- if (dispatch_q->head == dispatch_q->tail)
- adreno_dispatcher_schedule(device);
- }
/*
- * If inflight went to 0, queue back up the event processor to catch
- * stragglers
+ * As long as there are inflight commands, process retired comamnds from
+ * all cmdqueues
*/
- if (dispatcher->inflight == 0 && count)
- kgsl_schedule_work(&device->event_work);
-
- /* Try to dispatch new commands */
- _adreno_dispatcher_issuecmds(adreno_dev);
-
-done:
- /* Either update the timer for the next command batch or disable it */
- if (dispatcher->inflight) {
- struct kgsl_cmdbatch *cmdbatch =
- adreno_dev->cur_rb->dispatch_q.cmd_q[
- adreno_dev->cur_rb->dispatch_q.head];
- if (cmdbatch && adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR))
- /* Update the timeout timer for the next cmdbatch */
- mod_timer(&dispatcher->timer, cmdbatch->expires);
-
- /* There are still things in flight - update the idle counts */
- mutex_lock(&device->mutex);
- kgsl_pwrscale_update(device);
- mod_timer(&device->idle_timer, jiffies +
- device->pwrctrl.interval_timeout);
- mutex_unlock(&device->mutex);
- } else {
- /* There is nothing left in the pipeline. Shut 'er down boys */
- mutex_lock(&device->mutex);
+ for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
+ struct adreno_dispatcher_cmdqueue *cmdqueue =
+ CMDQUEUE(&adreno_dev->ringbuffers[i]);
- if (test_and_clear_bit(ADRENO_DISPATCHER_ACTIVE,
- &dispatcher->priv))
- complete_all(&dispatcher->idle_gate);
+ count += adreno_dispatch_process_cmdqueue(adreno_dev,
+ cmdqueue);
+ if (dispatcher->inflight == 0)
+ break;
+ }
- /*
- * Stop the fault timer before decrementing the active count to
- * avoid reading the hardware registers while we are trying to
- * turn clocks off
- */
- del_timer_sync(&dispatcher->fault_timer);
+ /*
+ * dispatcher_do_fault() returns 0 if no faults occurred. If that is the
+ * case, then clean up preemption and try to schedule more work
+ */
+ if (dispatcher_do_fault(adreno_dev) == 0) {
+ /* Clean up after preemption */
+ if (gpudev->preemption_schedule)
+ gpudev->preemption_schedule(adreno_dev);
- if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
- kgsl_active_count_put(device);
- clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
- }
+ /* Re-kick the event engine to catch stragglers */
+ if (dispatcher->inflight == 0 && count != 0)
+ kgsl_schedule_work(&device->event_work);
- mutex_unlock(&device->mutex);
+ /* Run the scheduler for to dispatch new commands */
+ _adreno_dispatcher_issuecmds(adreno_dev);
}
+ /*
+ * If there are commands pending, update the timers, otherwise release
+ * the power state to prepare for power down
+ */
+ if (dispatcher->inflight > 0)
+ _dispatcher_update_timers(adreno_dev);
+ else
+ _dispatcher_power_down(adreno_dev);
+
mutex_unlock(&dispatcher->mutex);
}
@@ -2305,7 +2246,7 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
struct adreno_dispatcher_cmdqueue *dispatch_q =
&(rb->dispatch_q);
- while (dispatch_q->head != dispatch_q->tail) {
+ while (!adreno_cmdqueue_is_empty(dispatch_q)) {
kgsl_cmdbatch_destroy(
dispatch_q->cmd_q[dispatch_q->head]);
dispatch_q->head = (dispatch_q->head + 1)
@@ -2395,9 +2336,9 @@ static DISPATCHER_UINT_ATTR(fault_throttle_burst, 0644, 0,
static DISPATCHER_UINT_ATTR(disp_preempt_fair_sched, 0644, 0,
adreno_disp_preempt_fair_sched);
static DISPATCHER_UINT_ATTR(dispatch_time_slice, 0644, 0,
- _dispatch_time_slice);
+ adreno_dispatch_time_slice);
static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
- _dispatch_starvation_time);
+ adreno_dispatch_starvation_time);
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
@@ -2474,9 +2415,6 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- setup_timer(&dispatcher->preempt_timer, adreno_dispatcher_preempt_timer,
- (unsigned long) adreno_dev);
-
INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
@@ -2485,9 +2423,6 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
plist_head_init(&dispatcher->pending);
spin_lock_init(&dispatcher->plist_lock);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
-
ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
&device->dev->kobj, "dispatch");
@@ -2544,49 +2479,3 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
adreno_dispatcher_schedule(device);
return ret;
}
-
-void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch;
-
- if (dispatch_q->head != dispatch_q->tail) {
- /*
- * retire cmdbacthes from previous q, and don't check for
- * timeout since the cmdbatch may have been preempted
- */
- adreno_dispatch_process_cmdqueue(adreno_dev,
- dispatch_q, 0);
- }
-
- /* set the timer for the first cmdbatch of active dispatch_q */
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- if (dispatch_q->head != dispatch_q->tail) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
- }
- kgsl_schedule_work(&device->event_work);
-}
-
-/**
- * adreno_dispatcher_preempt_callback() - Callback funcion for CP_SW interrupt
- * @adreno_dev: The device on which the interrupt occurred
- * @bit: Interrupt bit in the interrupt status register
- */
-void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
- int bit)
-{
- struct adreno_dispatcher *dispatcher = &(adreno_dev->dispatcher);
-
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state))
- return;
-
- trace_adreno_hw_preempt_trig_to_comp_int(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-}
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 308d5b936819..699c3e4adb27 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -11,29 +11,13 @@
*
*/
-
#ifndef ____ADRENO_DISPATCHER_H
#define ____ADRENO_DISPATCHER_H
-/* Time to allow preemption to complete (in ms) */
-#define ADRENO_DISPATCH_PREEMPT_TIMEOUT 10000
-
extern unsigned int adreno_disp_preempt_fair_sched;
extern unsigned int adreno_cmdbatch_timeout;
-
-/**
- * enum adreno_dispatcher_preempt_states - States of dispatcher for ringbuffer
- * preemption
- * @ADRENO_DISPATCHER_PREEMPT_CLEAR: No preemption is underway,
- * only 1 preemption can be underway at any point
- * @ADRENO_DISPATCHER_PREEMPT_TRIGGERED: A preemption is underway
- * @ADRENO_DISPATCHER_PREEMPT_COMPLETE: A preemption has just completed
- */
-enum adreno_dispatcher_preempt_states {
- ADRENO_DISPATCHER_PREEMPT_CLEAR = 0,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE,
-};
+extern unsigned int adreno_dispatch_starvation_time;
+extern unsigned int adreno_dispatch_time_slice;
/**
* enum adreno_dispatcher_starve_timer_states - Starvation control states of
@@ -71,6 +55,7 @@ enum adreno_dispatcher_starve_timer_states {
* @head: Head pointer to the q
* @tail: Queues tail pointer
* @active_context_count: Number of active contexts seen in this rb cmdqueue
+ * @expires: The jiffies value at which this cmdqueue has run too long
*/
struct adreno_dispatcher_cmdqueue {
struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
@@ -78,6 +63,7 @@ struct adreno_dispatcher_cmdqueue {
unsigned int head;
unsigned int tail;
int active_context_count;
+ unsigned long expires;
};
/**
@@ -92,11 +78,6 @@ struct adreno_dispatcher_cmdqueue {
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
* @idle_gate: Gate to wait on for dispatcher to idle
- * @preemption_state: Indicated what state the dispatcher is in, states are
- * defined by enum adreno_dispatcher_preempt_states
- * @preempt_token_submit: Indicates if a preempt token has been subnitted in
- * current ringbuffer.
- * @preempt_timer: Timer to track if preemption occured within specified time
* @disp_preempt_fair_sched: If set then dispatcher will try to be fair to
* starving RB's by scheduling them in and enforcing a minimum time slice
* for every RB that is scheduled to run on the device
@@ -113,9 +94,6 @@ struct adreno_dispatcher {
struct work_struct work;
struct kobject kobj;
struct completion idle_gate;
- atomic_t preemption_state;
- int preempt_token_submit;
- struct timer_list preempt_timer;
unsigned int disp_preempt_fair_sched;
};
@@ -141,12 +119,12 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
struct adreno_context *drawctxt);
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
-struct adreno_ringbuffer *adreno_dispatcher_get_highest_busy_rb(
- struct adreno_device *adreno_dev);
-int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
- int long_ib_detect);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
struct adreno_dispatcher_cmdqueue *dispatch_q);
+static inline bool adreno_cmdqueue_is_empty(
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d8498d938b6a..fb95f6108fb8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -346,7 +346,8 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
KGSL_CONTEXT_PWR_CONSTRAINT |
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE |
- KGSL_CONTEXT_PREEMPT_STYLE_MASK);
+ KGSL_CONTEXT_PREEMPT_STYLE_MASK |
+ KGSL_CONTEXT_NO_SNAPSHOT);
/* Check for errors before trying to initialize */
@@ -466,20 +467,6 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
list_del_init(&drawctxt->active_node);
spin_unlock(&adreno_dev->active_list_lock);
- /* deactivate context */
- mutex_lock(&device->mutex);
- if (rb->drawctxt_active == drawctxt) {
- if (adreno_dev->cur_rb == rb) {
- if (!kgsl_active_count_get(device)) {
- adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);
- kgsl_active_count_put(device);
- } else
- BUG();
- } else
- adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);
- }
- mutex_unlock(&device->mutex);
-
spin_lock(&drawctxt->lock);
count = drawctxt_detach_cmdbatches(drawctxt, list);
spin_unlock(&drawctxt->lock);
@@ -548,12 +535,21 @@ void adreno_drawctxt_destroy(struct kgsl_context *context)
kfree(drawctxt);
}
+static void _drawctxt_switch_wait_callback(struct kgsl_device *device,
+ struct kgsl_event_group *group,
+ void *priv, int result)
+{
+ struct adreno_context *drawctxt = (struct adreno_context *) priv;
+
+ kgsl_context_put(&drawctxt->base);
+}
+
/**
* adreno_drawctxt_switch - switch the current draw context in a given RB
* @adreno_dev - The 3D device that owns the context
* @rb: The ringubffer pointer on which the current context is being changed
* @drawctxt - the 3D context to switch to
- * @flags - Flags to accompany the switch (from user space)
+ * @flags: Control flags for the switch
*
* Switch the current draw context in given RB
*/
@@ -583,8 +579,7 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
return -ENOENT;
- trace_adreno_drawctxt_switch(rb,
- drawctxt, flags);
+ trace_adreno_drawctxt_switch(rb, drawctxt);
/* Get a refcount to the new instance */
if (drawctxt) {
@@ -596,16 +591,18 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
/* No context - set the default pagetable and thats it. */
new_pt = device->mmu.defaultpagetable;
}
- ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Failed to set pagetable on rb %d\n", rb->id);
+ ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt, flags);
+ if (ret)
return ret;
- }
- /* Put the old instance of the active drawctxt */
- if (rb->drawctxt_active)
- kgsl_context_put(&rb->drawctxt_active->base);
+ if (rb->drawctxt_active) {
+ /* Wait for the timestamp to expire */
+ if (kgsl_add_event(device, &rb->events, rb->timestamp,
+ _drawctxt_switch_wait_callback,
+ rb->drawctxt_active)) {
+ kgsl_context_put(&rb->drawctxt_active->base);
+ }
+ }
rb->drawctxt_active = drawctxt;
return 0;
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 7e80247e9322..5ea911954991 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -104,6 +104,9 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_CMD,
};
+/* Flags for adreno_drawctxt_switch() */
+#define ADRENO_CONTEXT_SWITCH_FORCE_GPU BIT(0)
+
struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
uint32_t *flags);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 519087a77b83..0d5e3e094c36 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -103,7 +103,7 @@ static long adreno_ioctl_preemption_counters_query(
levels_to_copy = gpudev->num_prio_levels;
if (copy_to_user((void __user *) (uintptr_t) read->counters,
- adreno_dev->preemption_counters.hostptr,
+ adreno_dev->preempt.counters.hostptr,
levels_to_copy * size_level))
return -EFAULT;
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 2eeda01b3c4d..aa00dcb84185 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -275,6 +275,7 @@ static bool _ctx_switch_use_cpu_path(
struct adreno_ringbuffer *rb)
{
struct kgsl_mmu *mmu = KGSL_MMU(adreno_dev);
+
/*
* If rb is current, we can use cpu path when GPU is
* idle and we are switching to default pt.
@@ -284,7 +285,7 @@ static bool _ctx_switch_use_cpu_path(
if (adreno_dev->cur_rb == rb)
return adreno_isidle(KGSL_DEVICE(adreno_dev)) &&
(new_pt == mmu->defaultpagetable);
- else if ((rb->wptr == rb->rptr) &&
+ else if (adreno_rb_empty(rb) &&
(new_pt == mmu->defaultpagetable))
return true;
@@ -360,8 +361,7 @@ static unsigned int _adreno_mmu_set_pt_update_condition(
*/
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 1;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -375,14 +375,11 @@ static unsigned int _adreno_mmu_set_pt_update_condition(
*cmds++ = (1 << 8) | (1 << 4) | 3;
cmds += cp_gpuaddr(adreno_dev, cmds,
(adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname)));
+ PT_INFO_OFFSET(current_global_ptname)));
*cmds++ = ptname;
*cmds++ = 0xFFFFFFFF;
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 0;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -406,23 +403,18 @@ static unsigned int _adreno_iommu_pt_update_pid_to_mem(
unsigned int *cmds_orig = cmds;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(current_rb_ptname)));
*cmds++ = ptname;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
cmds += cp_gpuaddr(adreno_dev, cmds,
- (adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname)));
+ (adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(current_global_ptname)));
*cmds++ = ptname;
/* pagetable switch done, Housekeeping: set the switch_pt_enable to 0 */
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 0;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -444,14 +436,10 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
/* set flag that indicates whether pt switch is required*/
cmds += _adreno_mmu_set_pt_update_condition(rb, cmds, ptname);
*cmds++ = cp_mem_packet(adreno_dev, CP_COND_EXEC, 4, 2);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 1;
/* Exec count to be filled later */
cond_exec_ptr = cmds;
@@ -566,7 +554,7 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0)));
+ PT_INFO_OFFSET(ttbr0)));
*cmds++ = lower_32_bits(ttbr0);
*cmds++ = upper_32_bits(ttbr0);
*cmds++ = contextidr;
@@ -651,14 +639,14 @@ static unsigned int __add_curr_ctxt_cmds(struct adreno_ringbuffer *rb,
*cmds++ = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds, device->memstore.gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, current_context));
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, current_context));
*cmds++ = (drawctxt ? drawctxt->base.id : 0);
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds, device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ MEMSTORE_ID_GPU_ADDR(device,
+ KGSL_MEMSTORE_GLOBAL, current_context));
*cmds++ = (drawctxt ? drawctxt->base.id : 0);
/* Invalidate UCHE for new context */
@@ -706,7 +694,7 @@ static void _set_ctxt_cpu(struct adreno_ringbuffer *rb,
}
/* Update rb memstore with current context */
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_RB_OFFSET(rb, current_context),
+ MEMSTORE_RB_OFFSET(rb, current_context),
drawctxt ? drawctxt->base.id : 0);
}
@@ -746,26 +734,11 @@ static int _set_pagetable_cpu(struct adreno_ringbuffer *rb,
if (result)
return result;
/* write the new pt set to memory var */
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), new_pt->name);
+ adreno_ringbuffer_set_global(adreno_dev, new_pt->name);
}
/* Update the RB pagetable info here */
- kgsl_sharedmem_writel(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname), new_pt->name);
- kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- ttbr0), kgsl_mmu_pagetable_get_ttbr0(new_pt));
- kgsl_sharedmem_writel(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- contextidr), kgsl_mmu_pagetable_get_contextidr(new_pt));
+ adreno_ringbuffer_set_pagetable(rb, new_pt);
return 0;
}
@@ -795,8 +768,6 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
return 0;
}
- kgsl_mmu_enable_clk(KGSL_MMU(adreno_dev));
-
cmds += adreno_iommu_set_pt_generate_cmds(rb, cmds, new_pt);
if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) {
@@ -812,16 +783,6 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
KGSL_CMD_FLAGS_PMODE, link,
(unsigned int)(cmds - link));
- /*
- * On error disable the IOMMU clock right away otherwise turn it off
- * after the command has been retired
- */
- if (result)
- kgsl_mmu_disable_clk(KGSL_MMU(adreno_dev));
- else
- adreno_ringbuffer_mmu_disable_clk_on_ts(KGSL_DEVICE(adreno_dev),
- rb, rb->timestamp);
-
kfree(link);
return result;
}
@@ -886,7 +847,8 @@ int adreno_iommu_init(struct adreno_device *adreno_dev)
*/
int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt)
+ struct adreno_context *drawctxt,
+ unsigned long flags)
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -897,7 +859,8 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
if (rb->drawctxt_active)
cur_pt = rb->drawctxt_active->base.proc_priv->pagetable;
- cpu_path = _ctx_switch_use_cpu_path(adreno_dev, new_pt, rb);
+ cpu_path = !(flags & ADRENO_CONTEXT_SWITCH_FORCE_GPU) &&
+ _ctx_switch_use_cpu_path(adreno_dev, new_pt, rb);
/* Pagetable switch */
if (new_pt != cur_pt) {
@@ -907,10 +870,8 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
result = _set_pagetable_gpu(rb, new_pt);
}
- if (result) {
- KGSL_DRV_ERR(device, "Error switching pagetable %d\n", result);
+ if (result)
return result;
- }
/* Context switch */
if (cpu_path)
@@ -918,8 +879,5 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
else
result = _set_ctxt_gpu(rb, drawctxt);
- if (result)
- KGSL_DRV_ERR(device, "Error switching context %d\n", result);
-
return result;
}
diff --git a/drivers/gpu/msm/adreno_iommu.h b/drivers/gpu/msm/adreno_iommu.h
index c557c65bb4c9..5a6c2c549370 100644
--- a/drivers/gpu/msm/adreno_iommu.h
+++ b/drivers/gpu/msm/adreno_iommu.h
@@ -17,7 +17,8 @@
#ifdef CONFIG_QCOM_KGSL_IOMMU
int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt);
+ struct adreno_context *drawctxt,
+ unsigned long flags);
int adreno_iommu_init(struct adreno_device *adreno_dev);
@@ -33,7 +34,8 @@ static inline int adreno_iommu_init(struct adreno_device *adreno_dev)
static inline int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt)
+ struct adreno_context *drawctxt,
+ unsigned long flags)
{
return 0;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index dceb8fb93461..0160939e97f9 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -30,8 +30,6 @@
#include "a3xx_reg.h"
#include "adreno_a5xx.h"
-#define GSL_RB_NOP_SIZEDWORDS 2
-
#define RB_HOSTPTR(_rb, _pos) \
((unsigned int *) ((_rb)->buffer_desc.hostptr + \
((_pos) * sizeof(unsigned int))))
@@ -50,86 +48,89 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb)
if (device->cff_dump_enable == 0)
return;
- /*
- * This code is predicated on the fact that we write a full block of
- * stuff without wrapping
- */
- BUG_ON(rb->wptr < rb->last_wptr);
-
- size = (rb->wptr - rb->last_wptr) * sizeof(unsigned int);
+ size = (rb->_wptr - rb->last_wptr) * sizeof(unsigned int);
hostptr = RB_HOSTPTR(rb, rb->last_wptr);
gpuaddr = RB_GPUADDR(rb, rb->last_wptr);
kgsl_cffdump_memcpy(device, gpuaddr, hostptr, size);
+ rb->last_wptr = rb->_wptr;
}
-void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
+static void adreno_get_submit_time(struct adreno_device *adreno_dev,
struct adreno_submit_time *time)
{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- BUG_ON(rb->wptr == 0);
-
- /* Write the changes to CFF if so enabled */
- _cff_write_ringbuffer(rb);
-
+ unsigned long flags;
/*
- * Read the current GPU ticks and wallclock for most accurate
- * profiling
+ * Here we are attempting to create a mapping between the
+ * GPU time domain (alwayson counter) and the CPU time domain
+ * (local_clock) by sampling both values as close together as
+ * possible. This is useful for many types of debugging and
+ * profiling. In order to make this mapping as accurate as
+ * possible, we must turn off interrupts to avoid running
+ * interrupt handlers between the two samples.
*/
- if (time != NULL) {
- /*
- * Here we are attempting to create a mapping between the
- * GPU time domain (alwayson counter) and the CPU time domain
- * (local_clock) by sampling both values as close together as
- * possible. This is useful for many types of debugging and
- * profiling. In order to make this mapping as accurate as
- * possible, we must turn off interrupts to avoid running
- * interrupt handlers between the two samples.
- */
- unsigned long flags;
- local_irq_save(flags);
+ local_irq_save(flags);
- /* Read always on registers */
- if (!adreno_is_a3xx(adreno_dev)) {
- adreno_readreg64(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
- &time->ticks);
+ /* Read always on registers */
+ if (!adreno_is_a3xx(adreno_dev)) {
+ adreno_readreg64(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+ &time->ticks);
- /*
- * Mask hi bits as they may be incorrect on
- * a4x and some a5x
- */
- if (ADRENO_GPUREV(adreno_dev) >= 400 &&
+ /* Mask hi bits as they may be incorrect on some targets */
+ if (ADRENO_GPUREV(adreno_dev) >= 400 &&
ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
- time->ticks &= 0xFFFFFFFF;
- }
- else
- time->ticks = 0;
+ time->ticks &= 0xFFFFFFFF;
+ } else
+ time->ticks = 0;
- /* Get the kernel clock for time since boot */
- time->ktime = local_clock();
+ /* Get the kernel clock for time since boot */
+ time->ktime = local_clock();
- /* Get the timeofday for the wall time (for the user) */
- getnstimeofday(&time->utime);
+ /* Get the timeofday for the wall time (for the user) */
+ getnstimeofday(&time->utime);
- local_irq_restore(flags);
- }
+ local_irq_restore(flags);
+}
+
+void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned long flags;
- /* Memory barrier before informing the hardware of new commands */
- mb();
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (adreno_preempt_state(adreno_dev, ADRENO_DISPATCHER_PREEMPT_CLEAR) &&
- (adreno_dev->cur_rb == rb)) {
- /*
- * Let the pwrscale policy know that new commands have
- * been submitted.
- */
- kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+ if (adreno_dev->cur_rb == rb) {
+ /*
+ * Let the pwrscale policy know that new commands have
+ * been submitted.
+ */
+ kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ rb->_wptr);
+ }
}
+
+ rb->wptr = rb->_wptr;
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
+ struct adreno_submit_time *time)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+
+ /* Write the changes to CFF if so enabled */
+ _cff_write_ringbuffer(rb);
+
+ if (time != NULL)
+ adreno_get_submit_time(adreno_dev, time);
+
+ adreno_ringbuffer_wptr(adreno_dev, rb);
}
int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
@@ -141,125 +142,36 @@ int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
return adreno_spin_idle(adreno_dev, timeout);
}
-static int
-adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb,
- unsigned int numcmds, int wptr_ahead)
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ unsigned int dwords)
{
- int nopcount = 0;
- unsigned int freecmds;
- unsigned int wptr = rb->wptr;
- unsigned int *cmds = NULL;
- uint64_t gpuaddr;
- unsigned long wait_time;
- unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- unsigned int rptr;
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ unsigned int rptr = adreno_get_rptr(rb);
+ unsigned int ret;
- /* if wptr ahead, fill the remaining with NOPs */
- if (wptr_ahead) {
- /* -1 for header */
- nopcount = KGSL_RB_DWORDS - rb->wptr - 1;
-
- cmds = RB_HOSTPTR(rb, rb->wptr);
- gpuaddr = RB_GPUADDR(rb, rb->wptr);
-
- rptr = adreno_get_rptr(rb);
- /* For non current rb we don't expect the rptr to move */
- if ((adreno_dev->cur_rb != rb ||
- !adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) &&
- !rptr)
- return -ENOSPC;
-
- /* Make sure that rptr is not 0 before submitting
- * commands at the end of ringbuffer. We do not
- * want the rptr and wptr to become equal when
- * the ringbuffer is not empty */
- wait_time = jiffies + wait_timeout;
- while (!rptr) {
- rptr = adreno_get_rptr(rb);
- if (time_after(jiffies, wait_time))
- return -ETIMEDOUT;
- }
-
- rb->wptr = 0;
- }
-
- rptr = adreno_get_rptr(rb);
- freecmds = rptr - rb->wptr;
- if (freecmds == 0 || freecmds > numcmds)
- goto done;
+ if (rptr <= rb->_wptr) {
+ unsigned int *cmds;
- /* non current rptr will not advance anyway or if preemption underway */
- if (adreno_dev->cur_rb != rb ||
- !adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) {
- rb->wptr = wptr;
- return -ENOSPC;
- }
-
- wait_time = jiffies + wait_timeout;
- /* wait for space in ringbuffer */
- while (1) {
- rptr = adreno_get_rptr(rb);
-
- freecmds = rptr - rb->wptr;
-
- if (freecmds == 0 || freecmds > numcmds)
- break;
-
- if (time_after(jiffies, wait_time)) {
- KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
- "Timed out waiting for freespace in RB rptr: 0x%x, wptr: 0x%x, rb id %d\n",
- rptr, wptr, rb->id);
- return -ETIMEDOUT;
+ if (rb->_wptr + dwords <= (KGSL_RB_DWORDS - 2)) {
+ ret = rb->_wptr;
+ rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
+ return RB_HOSTPTR(rb, ret);
}
- }
-done:
- if (wptr_ahead) {
- *cmds = cp_packet(adreno_dev, CP_NOP, nopcount);
- kgsl_cffdump_write(KGSL_DEVICE(adreno_dev), gpuaddr, *cmds);
- }
- return 0;
-}
+ cmds = RB_HOSTPTR(rb, rb->_wptr);
+ *cmds = cp_packet(adreno_dev, CP_NOP,
+ KGSL_RB_DWORDS - rb->_wptr - 1);
-unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
- unsigned int numcmds)
-{
- unsigned int *ptr = NULL;
- int ret = 0;
- unsigned int rptr;
- BUG_ON(numcmds >= KGSL_RB_DWORDS);
-
- rptr = adreno_get_rptr(rb);
- /* check for available space */
- if (rb->wptr >= rptr) {
- /* wptr ahead or equal to rptr */
- /* reserve dwords for nop packet */
- if ((rb->wptr + numcmds) > (KGSL_RB_DWORDS -
- GSL_RB_NOP_SIZEDWORDS))
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 1);
- } else {
- /* wptr behind rptr */
- if ((rb->wptr + numcmds) >= rptr)
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 0);
- /* check for remaining space */
- /* reserve dwords for nop packet */
- if (!ret && (rb->wptr + numcmds) > (KGSL_RB_DWORDS -
- GSL_RB_NOP_SIZEDWORDS))
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+ rb->_wptr = 0;
}
- if (!ret) {
- rb->last_wptr = rb->wptr;
-
- ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
- rb->wptr += numcmds;
- } else
- ptr = ERR_PTR(ret);
+ if (rb->_wptr + dwords < rptr) {
+ ret = rb->_wptr;
+ rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
+ return RB_HOSTPTR(rb, ret);
+ }
- return ptr;
+ return ERR_PTR(-ENOSPC);
}
/**
@@ -279,8 +191,10 @@ int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_sharedmem_set(device, &(rb->buffer_desc),
0, 0xAA, KGSL_RB_SIZE);
+ kgsl_sharedmem_writel(device, &device->scratch,
+ SCRATCH_RPTR_OFFSET(rb->id), 0);
rb->wptr = 0;
- rb->rptr = 0;
+ rb->_wptr = 0;
rb->wptr_preempt_end = 0xFFFFFFFF;
rb->starve_timer_state =
ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
@@ -322,6 +236,8 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
rb->timestamp = 0;
init_waitqueue_head(&rb->ts_expire_waitq);
+ spin_lock_init(&rb->preempt_lock);
+
/*
* Allocate mem for storing RB pagetables and commands to
* switch pagetable
@@ -433,6 +349,18 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds,
return cmds - start;
}
+static inline int cp_mem_write(struct adreno_device *adreno_dev,
+ unsigned int *cmds, uint64_t gpuaddr, unsigned int value)
+{
+ int dwords = 0;
+
+ cmds[dwords++] = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
+ dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], gpuaddr);
+ cmds[dwords++] = value;
+
+ return dwords;
+}
+
static int
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int flags, unsigned int *cmds,
@@ -446,18 +374,20 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int total_sizedwords = sizedwords;
unsigned int i;
unsigned int context_id = 0;
- uint64_t gpuaddr = device->memstore.gpuaddr;
bool profile_ready;
struct adreno_context *drawctxt = rb->drawctxt_active;
struct kgsl_context *context = NULL;
bool secured_ctxt = false;
- uint64_t cond_addr;
static unsigned int _seq_cnt;
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
return -ENOENT;
+ /* On fault return error so that we don't keep submitting */
+ if (adreno_gpu_fault(adreno_dev) != 0)
+ return -EPROTO;
+
rb->timestamp++;
/* If this is a internal IB, use the global timestamp for it */
@@ -529,7 +459,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* required in ringbuffer and adjust the write pointer depending on
* gpucore at the end of this function.
*/
- total_sizedwords += 4; /* sop timestamp */
+ total_sizedwords += 8; /* sop timestamp */
total_sizedwords += 5; /* eop timestamp */
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
@@ -564,14 +494,9 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = KGSL_CMD_IDENTIFIER;
if (adreno_is_preemption_enabled(adreno_dev) &&
- gpudev->preemption_pre_ibsubmit) {
- cond_addr = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id,
- preempted);
+ gpudev->preemption_pre_ibsubmit)
ringcmds += gpudev->preemption_pre_ibsubmit(
- adreno_dev, rb, ringcmds, context,
- cond_addr, NULL);
- }
+ adreno_dev, rb, ringcmds, context);
if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
@@ -601,16 +526,15 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
adreno_profile_preib_processing(adreno_dev, drawctxt,
&flags, &ringcmds);
- /* start-of-pipeline timestamp */
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
+ /* start-of-pipeline timestamp for the context */
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
- gpuaddr + KGSL_MEMSTORE_OFFSET(context_id,
- soptimestamp));
- else
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
- gpuaddr + KGSL_MEMSTORE_RB_OFFSET(rb, soptimestamp));
- *ringcmds++ = timestamp;
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, context_id, soptimestamp),
+ timestamp);
+
+ /* start-of-pipeline timestamp for the ringbuffer */
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, soptimestamp), rb->timestamp);
if (secured_ctxt)
ringcmds += cp_secure_mode(adreno_dev, ringcmds, 1);
@@ -659,11 +583,9 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* early detection of timestamp interrupt storms to stave
* off system collapse.
*/
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- ref_wait_ts));
- *ringcmds++ = ++_seq_cnt;
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, KGSL_MEMSTORE_GLOBAL,
+ ref_wait_ts), ++_seq_cnt);
/*
* end-of-pipeline timestamp. If per context timestamps is not
@@ -677,16 +599,17 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = CACHE_FLUSH_TS;
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+ ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, context_id, eoptimestamp));
*ringcmds++ = timestamp;
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, eoptimestamp));
- *ringcmds++ = rb->timestamp;
+
+ /* Write the end of pipeline timestamp to the ringbuffer too */
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp),
+ rb->timestamp);
} else {
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, eoptimestamp));
+ ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp));
*ringcmds++ = timestamp;
}
@@ -707,8 +630,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb,
- ringcmds, &drawctxt->base);
+ ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
+ ringcmds);
/*
* If we have more ringbuffer commands than space reserved
@@ -722,7 +645,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* required. If we have commands less than the space reserved in RB
* adjust the wptr accordingly.
*/
- rb->wptr = rb->wptr - (total_sizedwords - (ringcmds - start));
+ rb->_wptr = rb->_wptr - (total_sizedwords - (ringcmds - start));
adreno_ringbuffer_submit(rb, time);
@@ -1063,14 +986,24 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- ret = adreno_drawctxt_switch(adreno_dev, rb, drawctxt, cmdbatch->flags);
+ /* Context switches commands should *always* be on the GPU */
+ ret = adreno_drawctxt_switch(adreno_dev, rb, drawctxt,
+ ADRENO_CONTEXT_SWITCH_FORCE_GPU);
/*
* In the unlikely event of an error in the drawctxt switch,
* treat it like a hang
*/
- if (ret)
+ if (ret) {
+ /*
+ * It is "normal" to get a -ENOSPC or a -ENOENT. Don't log it,
+ * the upper layers know how to handle it
+ */
+ if (ret != -ENOSPC && ret != -ENOENT)
+ KGSL_DRV_ERR(device,
+ "Unable to switch draw context: %d\n", ret);
goto done;
+ }
if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
flags = KGSL_CMD_FLAGS_WFI;
@@ -1138,44 +1071,6 @@ done:
}
/**
- * adreno_ringbuffer_mmu_clk_disable_event() - Callback function that
- * disables the MMU clocks.
- * @device: Device pointer
- * @context: The ringbuffer context pointer
- * @data: Pointer containing the adreno_mmu_disable_clk_param structure
- * @type: The event call type (RETIRED or CANCELLED)
- */
-static void adreno_ringbuffer_mmu_clk_disable_event(struct kgsl_device *device,
- struct kgsl_event_group *group, void *data, int type)
-{
- kgsl_mmu_disable_clk(&device->mmu);
-}
-
-/*
- * adreno_ringbuffer_mmu_disable_clk_on_ts() - Sets up event to disable MMU
- * clocks
- * @device - The kgsl device pointer
- * @rb: The ringbuffer in whose event list the event is added
- * @timestamp: The timestamp on which the event should trigger
- *
- * Creates an event to disable the MMU clocks on timestamp and if event
- * already exists then updates the timestamp of disabling the MMU clocks
- * with the passed in ts if it is greater than the current value at which
- * the clocks will be disabled
- * Return - void
- */
-void
-adreno_ringbuffer_mmu_disable_clk_on_ts(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, unsigned int timestamp)
-{
- if (kgsl_add_event(device, &(rb->events), timestamp,
- adreno_ringbuffer_mmu_clk_disable_event, NULL)) {
- KGSL_DRV_ERR(device,
- "Failed to add IOMMU disable clk event\n");
- }
-}
-
-/**
* adreno_ringbuffer_wait_callback() - Callback function for event registered
* on a ringbuffer timestamp
* @device: Device for which the the callback is valid
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index f1980fd92961..b126f710b5e6 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -73,13 +73,16 @@ struct adreno_ringbuffer_pagetable_info {
unsigned int contextidr;
};
+#define PT_INFO_OFFSET(_field) \
+ offsetof(struct adreno_ringbuffer_pagetable_info, _field)
+
/**
* struct adreno_ringbuffer - Definition for an adreno ringbuffer object
* @flags: Internal control flags for the ringbuffer
- * @buffer_desc: Pointer to the ringbuffer memory descriptor
- * @wptr: Local copy of the wptr offset
- * @rptr: Read pointer offset in dwords from baseaddr
- * @last_wptr: offset of the last H/W committed wptr
+ * @buffer_desc: Pointer to the ringbuffer memory descripto
+ * @_wptr: The next value of wptr to be written to the hardware on submit
+ * @wptr: Local copy of the wptr offset last written to hardware
+ * @last_wptr: offset of the last wptr that was written to CFF
* @rb_ctx: The context that represents a ringbuffer
* @id: Priority level of the ringbuffer, also used as an ID
* @fault_detect_ts: The last retired global timestamp read during fault detect
@@ -101,12 +104,13 @@ struct adreno_ringbuffer_pagetable_info {
* @sched_timer: Timer that tracks how long RB has been waiting to be scheduled
* or how long it has been scheduled for after preempting in
* @starve_timer_state: Indicates the state of the wait.
+ * @preempt_lock: Lock to protect the wptr pointer while it is being updated
*/
struct adreno_ringbuffer {
uint32_t flags;
struct kgsl_memdesc buffer_desc;
+ unsigned int _wptr;
unsigned int wptr;
- unsigned int rptr;
unsigned int last_wptr;
int id;
unsigned int fault_detect_ts;
@@ -122,14 +126,12 @@ struct adreno_ringbuffer {
int preempted_midway;
unsigned long sched_timer;
enum adreno_dispatcher_starve_timer_states starve_timer_state;
+ spinlock_t preempt_lock;
};
/* Returns the current ringbuffer */
#define ADRENO_CURRENT_RINGBUFFER(a) ((a)->cur_rb)
-#define KGSL_MEMSTORE_RB_OFFSET(rb, field) \
- KGSL_MEMSTORE_OFFSET((rb->id + KGSL_MEMSTORE_MAX), field)
-
int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
@@ -170,9 +172,6 @@ void adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device);
void adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device);
-void adreno_ringbuffer_mmu_disable_clk_on_ts(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, unsigned int ts);
-
int adreno_ringbuffer_waittimestamp(struct adreno_ringbuffer *rb,
unsigned int timestamp,
unsigned int msecs);
@@ -204,9 +203,10 @@ static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
}
static inline int adreno_ringbuffer_set_pt_ctx(struct adreno_ringbuffer *rb,
- struct kgsl_pagetable *pt, struct adreno_context *context)
+ struct kgsl_pagetable *pt, struct adreno_context *context,
+ unsigned long flags)
{
- return adreno_iommu_set_pt_ctx(rb, pt, context);
+ return adreno_iommu_set_pt_ctx(rb, pt, context, flags);
}
#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index ca61d36a1384..b069b16c75ef 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -467,7 +467,7 @@ static size_t snapshot_rb(struct kgsl_device *device, u8 *buf,
header->start = 0;
header->end = KGSL_RB_DWORDS;
header->wptr = rb->wptr;
- header->rptr = rb->rptr;
+ header->rptr = adreno_get_rptr(rb);
header->rbsize = KGSL_RB_DWORDS;
header->count = KGSL_RB_DWORDS;
adreno_rb_readtimestamp(adreno_dev, rb, KGSL_TIMESTAMP_QUEUED,
@@ -741,8 +741,7 @@ static size_t snapshot_global(struct kgsl_device *device, u8 *buf,
header->size = memdesc->size >> 2;
header->gpuaddr = memdesc->gpuaddr;
- header->ptbase =
- kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
+ header->ptbase = MMU_DEFAULT_TTBR0(device);
header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;
memcpy(ptr, memdesc->hostptr, memdesc->size);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 5f1bbb9a83b3..f52ddfa894d5 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -55,8 +55,8 @@ TRACE_EVENT(adreno_cmdbatch_queued,
TRACE_EVENT(adreno_cmdbatch_submitted,
TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
- struct adreno_ringbuffer *rb),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb),
+ struct adreno_ringbuffer *rb, unsigned int rptr),
+ TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -81,7 +81,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->usecs = usecs;
__entry->prio = cmdbatch->context->priority;
__entry->rb_id = rb->id;
- __entry->rptr = rb->rptr;
+ __entry->rptr = rptr;
__entry->wptr = rb->wptr;
__entry->q_inflight = rb->dispatch_q.inflight;
),
@@ -100,8 +100,8 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
TRACE_EVENT(adreno_cmdbatch_retired,
TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb),
- TP_ARGS(cmdbatch, inflight, start, retire, rb),
+ struct adreno_ringbuffer *rb, unsigned int rptr),
+ TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -126,7 +126,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__entry->retire = retire;
__entry->prio = cmdbatch->context->priority;
__entry->rb_id = rb->id;
- __entry->rptr = rb->rptr;
+ __entry->rptr = rptr;
__entry->wptr = rb->wptr;
__entry->q_inflight = rb->dispatch_q.inflight;
),
@@ -267,9 +267,8 @@ TRACE_EVENT(adreno_drawctxt_wait_done,
TRACE_EVENT(adreno_drawctxt_switch,
TP_PROTO(struct adreno_ringbuffer *rb,
- struct adreno_context *newctx,
- unsigned int flags),
- TP_ARGS(rb, newctx, flags),
+ struct adreno_context *newctx),
+ TP_ARGS(rb, newctx),
TP_STRUCT__entry(
__field(int, rb_level)
__field(unsigned int, oldctx)
@@ -283,8 +282,8 @@ TRACE_EVENT(adreno_drawctxt_switch,
__entry->newctx = newctx ? newctx->base.id : 0;
),
TP_printk(
- "rb level=%d oldctx=%u newctx=%u flags=%X",
- __entry->rb_level, __entry->oldctx, __entry->newctx, flags
+ "rb level=%d oldctx=%u newctx=%u",
+ __entry->rb_level, __entry->oldctx, __entry->newctx
)
);
@@ -427,8 +426,9 @@ TRACE_EVENT(kgsl_a5xx_irq_status,
DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -440,8 +440,8 @@ DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->new_wptr = new_rb->wptr;
__entry->cur_rbbase = cur_rb->buffer_desc.gpuaddr;
@@ -458,26 +458,30 @@ DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_clear_to_trig,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_trig_to_comp,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_trig_to_comp_int,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -490,8 +494,8 @@ TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->new_wptr_end = new_rb->wptr_preempt_end;
__entry->new_wptr = new_rb->wptr;
@@ -509,8 +513,9 @@ TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
TRACE_EVENT(adreno_hw_preempt_token_submit,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -523,8 +528,8 @@ TRACE_EVENT(adreno_hw_preempt_token_submit,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->cur_wptr_end = cur_rb->wptr_preempt_end;
__entry->new_wptr = new_rb->wptr;
@@ -541,23 +546,37 @@ TRACE_EVENT(adreno_hw_preempt_token_submit,
)
);
-TRACE_EVENT(adreno_rb_starve,
- TP_PROTO(struct adreno_ringbuffer *rb),
- TP_ARGS(rb),
- TP_STRUCT__entry(__field(int, id)
- __field(unsigned int, rptr)
- __field(unsigned int, wptr)
+TRACE_EVENT(adreno_preempt_trigger,
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
+ TP_ARGS(cur, next),
+ TP_STRUCT__entry(
+ __field(struct adreno_ringbuffer *, cur)
+ __field(struct adreno_ringbuffer *, next)
),
- TP_fast_assign(__entry->id = rb->id;
- __entry->rptr = rb->rptr;
- __entry->wptr = rb->wptr;
+ TP_fast_assign(
+ __entry->cur = cur;
+ __entry->next = next;
),
- TP_printk(
- "rb %d r/w %x/%x starved", __entry->id, __entry->rptr,
- __entry->wptr
+ TP_printk("trigger from id=%d to id=%d",
+ __entry->cur->id, __entry->next->id
)
);
+TRACE_EVENT(adreno_preempt_done,
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
+ TP_ARGS(cur, next),
+ TP_STRUCT__entry(
+ __field(struct adreno_ringbuffer *, cur)
+ __field(struct adreno_ringbuffer *, next)
+ ),
+ TP_fast_assign(
+ __entry->cur = cur;
+ __entry->next = next;
+ ),
+ TP_printk("done switch to id=%d from id=%d",
+ __entry->next->id, __entry->cur->id
+ )
+);
#endif /* _ADRENO_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2563591f376e..f77dbb7f20af 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -233,6 +233,8 @@ int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
}
EXPORT_SYMBOL(kgsl_readtimestamp);
+static long gpumem_free_entry(struct kgsl_mem_entry *entry);
+
/* Scheduled by kgsl_mem_entry_put_deferred() */
static void _deferred_put(struct work_struct *work)
{
@@ -247,10 +249,8 @@ kgsl_mem_entry_create(void)
{
struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry != NULL) {
+ if (entry != NULL)
kref_init(&entry->refcount);
- INIT_WORK(&entry->work, _deferred_put);
- }
return entry;
}
@@ -1150,6 +1150,8 @@ static int kgsl_open_device(struct kgsl_device *device)
atomic_inc(&device->active_cnt);
kgsl_sharedmem_set(device, &device->memstore, 0, 0,
device->memstore.size);
+ kgsl_sharedmem_set(device, &device->scratch, 0, 0,
+ device->scratch.size);
result = device->ftbl->init(device);
if (result)
@@ -1855,7 +1857,10 @@ static long gpuobj_free_on_timestamp(struct kgsl_device_private *dev_priv,
static void gpuobj_free_fence_func(void *priv)
{
- kgsl_mem_entry_put_deferred((struct kgsl_mem_entry *) priv);
+ struct kgsl_mem_entry *entry = priv;
+
+ INIT_WORK(&entry->work, _deferred_put);
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -3910,11 +3915,13 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
status = kgsl_allocate_global(device, &device->memstore,
KGSL_MEMSTORE_SIZE, 0, 0);
- if (status != 0) {
- KGSL_DRV_ERR(device, "kgsl_allocate_global failed %d\n",
- status);
+ if (status != 0)
goto error_close_mmu;
- }
+
+ status = kgsl_allocate_global(device, &device->scratch,
+ PAGE_SIZE, 0, 0);
+ if (status != 0)
+ goto error_free_memstore;
/*
* The default request type PM_QOS_REQ_ALL_CORES is
@@ -3964,6 +3971,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
return 0;
+error_free_memstore:
+ kgsl_free_global(device, &device->memstore);
error_close_mmu:
kgsl_mmu_close(device);
error_pwrctrl_close:
@@ -3990,6 +3999,8 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
idr_destroy(&device->context_idr);
+ kgsl_free_global(device, &device->scratch);
+
kgsl_free_global(device, &device->memstore);
kgsl_mmu_close(device);
@@ -4091,8 +4102,9 @@ static int __init kgsl_core_init(void)
INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
kgsl_driver.workqueue = create_singlethread_workqueue("kgsl-workqueue");
- kgsl_driver.mem_workqueue =
- create_singlethread_workqueue("kgsl-mementry");
+
+ kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index dfe83be799b3..c172021c8944 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -37,6 +37,32 @@
#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
+#define MEMSTORE_RB_OFFSET(rb, field) \
+ KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
+
+#define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
+ ((dev)->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
+
+#define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
+ ((dev)->memstore.gpuaddr + \
+ KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
+
+/*
+ * SCRATCH MEMORY: The scratch memory is one page worth of data that
+ * is mapped into the GPU. This allows for some 'shared' data between
+ * the GPU and CPU. For example, it will be used by the GPU to write
+ * each updated RPTR for each RB.
+ *
+ * Used Data:
+ * Offset: Length(bytes): What
+ * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
+ */
+
+/* Shadow global helpers */
+#define SCRATCH_RPTR_OFFSET(id) ((id) * sizeof(unsigned int))
+#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
+ ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
+
/* Timestamp window used to detect rollovers (half of integer range) */
#define KGSL_TIMESTAMP_WINDOW 0x80000000
@@ -447,21 +473,6 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
-/**
- * kgsl_mem_entry_put_deferred() - Schedule a task to put the memory entry
- * @entry: Mem entry to put
- *
- * This function is for atomic contexts where a normal kgsl_mem_entry_put()
- * would result in the memory entry getting destroyed and possibly taking
- * mutexes along the way. Schedule the work to happen outside of the atomic
- * context.
- */
-static inline void kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
-{
- if (entry != NULL)
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
-}
-
/*
* kgsl_addr_range_overlap() - Checks if 2 ranges overlap
* @gpuaddr1: Start of first address range
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
index 1547ac02fdbf..d5cbf375b5d3 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ b/drivers/gpu/msm/kgsl_cmdbatch.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,7 +31,6 @@
* @fault_policy: Internal policy describing how to handle this command in case
* of a fault
* @fault_recovery: recovery actions actually tried for this batch
- * @expires: Point in time when the cmdbatch is considered to be hung
* @refcount: kref structure to maintain the reference count
* @cmdlist: List of IBs to issue
* @memlist: List of all memory used in this command batch
@@ -61,7 +60,6 @@ struct kgsl_cmdbatch {
unsigned long priv;
unsigned long fault_policy;
unsigned long fault_recovery;
- unsigned long expires;
struct kref refcount;
struct list_head cmdlist;
struct list_head memlist;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index c3fb2b81fcbd..4159a5fe375f 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -227,6 +227,7 @@ struct kgsl_device {
/* GPU shader memory size */
unsigned int shader_mem_len;
struct kgsl_memdesc memstore;
+ struct kgsl_memdesc scratch;
const char *iomemname;
const char *shadermemname;
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e1f9ad17d0ff..6f70b9ddd376 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,23 @@ static void _kgsl_event_worker(struct work_struct *work)
kmem_cache_free(events_cache, event);
}
+/* return true if the group needs to be processed */
+static bool _do_process_group(unsigned int processed, unsigned int cur)
+{
+ if (processed == cur)
+ return false;
+
+ /*
+ * This ensures that the timestamp didn't slip back accidently, maybe
+ * due to a memory barrier issue. This is highly unlikely but we've
+ * been burned here in the past.
+ */
+ if ((cur < processed) && ((processed - cur) < KGSL_TIMESTAMP_WINDOW))
+ return false;
+
+ return true;
+}
+
static void _process_event_group(struct kgsl_device *device,
struct kgsl_event_group *group, bool flush)
{
@@ -80,11 +97,7 @@ static void _process_event_group(struct kgsl_device *device,
group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
&timestamp);
- /*
- * If no timestamps have been retired since the last time we were here
- * then we can avoid going through this loop
- */
- if (!flush && timestamp_cmp(timestamp, group->processed) <= 0)
+ if (!flush && _do_process_group(group->processed, timestamp) == false)
goto out;
list_for_each_entry_safe(event, tmp, &group->events, node) {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index a338559ac0bb..103d290eb681 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -220,9 +220,6 @@ static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
if (ret == 0)
iommu_pt->attached = true;
- else
- KGSL_CORE_ERR("iommu_attach_device(%s) failed: %d\n",
- ctx->name, ret);
return ret;
}
@@ -1452,25 +1449,25 @@ done:
return ret;
}
+static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
+
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
{
int status;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
status = _setup_user_context(mmu);
if (status)
return status;
status = _setup_secure_context(mmu);
- if (status)
+ if (status) {
_detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
- else {
- kgsl_iommu_enable_clk(mmu);
- KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
- kgsl_iommu_disable_clk(mmu);
+ return status;
}
- return status;
+
+ /* Make sure the hardware is programmed to the default pagetable */
+ return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
}
static int
@@ -1707,23 +1704,15 @@ kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
*
* Return - void
*/
-static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pt)
+static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
- struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
- int ret = 0;
uint64_t ttbr0, temp;
unsigned int contextidr;
unsigned long wait_for_flush;
- /*
- * If using a global pagetable, we can skip all this
- * because the pagetable will be set up by the iommu
- * driver and never changed at runtime.
- */
- if (!kgsl_mmu_is_perprocess(mmu))
+ if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
return 0;
kgsl_iommu_enable_clk(mmu);
@@ -1731,14 +1720,6 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
- /*
- * Taking the liberty to spin idle since this codepath
- * is invoked when we can spin safely for it to be idle
- */
- ret = adreno_spin_idle(ADRENO_DEVICE(device), ADRENO_IDLE_TIMEOUT);
- if (ret)
- return ret;
-
KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
@@ -1767,10 +1748,8 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
cpu_relax();
}
- /* Disable smmu clock */
kgsl_iommu_disable_clk(mmu);
-
- return ret;
+ return 0;
}
/*
@@ -1788,8 +1767,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int ret = 0;
- unsigned int sctlr_val;
if ((adreno_dev->ft_pf_policy &
BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
@@ -1798,10 +1775,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
/* If not attached, policy will be updated during the next attach */
if (ctx->default_pt != NULL) {
- /* Need to idle device before changing options */
- ret = device->ftbl->idle(device);
- if (ret)
- return ret;
+ unsigned int sctlr_val;
kgsl_iommu_enable_clk(mmu);
@@ -1820,7 +1794,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
kgsl_iommu_disable_clk(mmu);
}
- return ret;
+ return 0;
}
static struct kgsl_protected_registers *
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 3652aa2e6ec4..5339917911b1 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -21,6 +21,12 @@
#define KGSL_MMU_GLOBAL_PT 0
#define KGSL_MMU_SECURE_PT 1
+#define MMU_DEFAULT_TTBR0(_d) \
+ (kgsl_mmu_pagetable_get_ttbr0((_d)->mmu.defaultpagetable))
+
+#define MMU_DEFAULT_CONTEXTIDR(_d) \
+ (kgsl_mmu_pagetable_get_contextidr((_d)->mmu.defaultpagetable))
+
struct kgsl_device;
enum kgsl_mmutype {
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index da8c8585d31e..2b9eef8b6351 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1381,6 +1381,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
_isense_clk_set_rate(pwr,
pwr->num_pwrlevels - 1);
}
+
+ /* Turn off the IOMMU clocks */
+ kgsl_mmu_disable_clk(&device->mmu);
} else if (requested_state == KGSL_STATE_SLEEP) {
/* High latency clock maintenance. */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
@@ -1428,7 +1431,11 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
pwr->gpu_bimc_interface_enabled = 1;
}
}
+
+ /* Turn on the IOMMU clocks */
+ kgsl_mmu_enable_clk(&device->mmu);
}
+
}
}
diff --git a/drivers/input/touchscreen/it7258_ts_i2c.c b/drivers/input/touchscreen/it7258_ts_i2c.c
new file mode 100644
index 000000000000..048358e2ef9d
--- /dev/null
+++ b/drivers/input/touchscreen/it7258_ts_i2c.c
@@ -0,0 +1,851 @@
+/* drivers/input/touchscreen/it7258_ts_i2c.c
+ *
+ * Copyright (C) 2014 ITE Tech. Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#define MAX_BUFFER_SIZE 144
+#define DEVICE_NAME "IT7260"
+#define SCREEN_X_RESOLUTION 320
+#define SCREEN_Y_RESOLUTION 320
+
+#define BUF_COMMAND 0x20 /* all commands writes go to this idx */
+#define BUF_SYS_COMMAND 0x40
+#define BUF_QUERY 0x80 /* "revice ready?" and "wake up please" and "read touch data" reads go to this idx */
+#define BUF_RESPONSE 0xA0 /* most command responce reads go to this idx */
+#define BUF_SYS_RESPONSE 0xC0
+#define BUF_POINT_INFO 0xE0 /* reads of "point" go through here and produce 14 bytes of data */
+
+/* commands and their subcommands. when no subcommands exist, a zero is send as the second byte */
+#define CMD_IDENT_CHIP 0x00
+#define CMD_READ_VERSIONS 0x01 /* VERSION_LENGTH bytes of data in response */
+# define VER_FIRMWARE 0x00
+# define VER_CONFIG 0x06
+# define VERSION_LENGTH 10
+#define CMD_PWR_CTL 0x04 /* subcommand is zero, next byte is power mode */
+# define PWR_CTL_LOW_POWER_MODE 0x01 /* idle mode */
+# define PWR_CTL_SLEEP_MODE 0x02 /* sleep mode */
+#define CMD_UNKNOWN_7 0x07 /* command is not documented in the datasheet v1.0.0.7 */
+#define CMD_FIRMWARE_REINIT_C 0x0C
+#define CMD_CALIBRATE 0x13 /* needs to be followed by 4 bytes of zeroes */
+#define CMD_FIRMWARE_UPGRADE 0x60
+# define FIRMWARE_MODE_ENTER 0x00
+# define FIRMWARE_MODE_EXIT 0x80
+#define CMD_SET_START_OFFSET 0x61 /* address for FW read/write */
+#define CMD_FW_WRITE 0x62 /* subcommand is number of bytes to write */
+#define CMD_FW_READ 0x63 /* subcommand is number of bytes to read */
+#define CMD_FIRMWARE_REINIT_6F 0x6F
+
+#define FW_WRITE_CHUNK_SIZE 128
+#define FW_WRITE_RETRY_COUNT 4
+#define CHIP_FLASH_SIZE 0x8000
+#define SYSFS_FW_UPLOAD_MODE_MANUAL 2
+#define SYSFS_RESULT_FAIL (-1)
+#define SYSFS_RESULT_NOT_DONE 0
+#define SYSFS_RESULT_SUCCESS 1
+#define DEVICE_READY_MAX_WAIT 500
+
+/* result of reading with BUF_QUERY bits */
+#define CMD_STATUS_BITS 0x07
+#define CMD_STATUS_DONE 0x00
+#define CMD_STATUS_BUSY 0x01
+#define CMD_STATUS_ERROR 0x02
+#define PT_INFO_BITS 0xF8
+#define BT_INFO_NONE 0x00
+#define PT_INFO_YES 0x80
+#define BT_INFO_NONE_BUT_DOWN 0x08 /* no new data but finder(s) still down */
+
+/* use this to include integers in commands */
+#define CMD_UINT16(v) ((uint8_t)(v)) , ((uint8_t)((v) >> 8))
+
+
+struct FingerData {
+ uint8_t xLo;
+ uint8_t hi;
+ uint8_t yLo;
+ uint8_t pressure;
+} __attribute__((packed));
+
+struct PointData {
+ uint8_t flags;
+ uint8_t palm;
+ struct FingerData fd[3];
+} __attribute__((packed));
+
+#define PD_FLAGS_DATA_TYPE_BITS 0xF0
+/* other types (like chip-detected gestures) exist but we do not care */
+#define PD_FLAGS_DATA_TYPE_TOUCH 0x00
+#define PD_FLAGS_NOT_PEN 0x08 /* set if pen touched, clear if finger(s) */
+#define PD_FLAGS_HAVE_FINGERS 0x07 /* a bit for each finger data that is valid (from lsb to msb) */
+#define PD_PALM_FLAG_BIT 0x01
+#define FD_PRESSURE_BITS 0x0F
+#define FD_PRESSURE_NONE 0x00
+#define FD_PRESSURE_HOVER 0x01
+#define FD_PRESSURE_LIGHT 0x02
+#define FD_PRESSURE_NORMAL 0x04
+#define FD_PRESSURE_HIGH 0x08
+#define FD_PRESSURE_HEAVY 0x0F
+
+struct IT7260_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+};
+
+static int8_t fwUploadResult = SYSFS_RESULT_NOT_DONE;
+static int8_t calibrationWasSuccessful = SYSFS_RESULT_NOT_DONE;
+static bool devicePresent = false;
+static DEFINE_MUTEX(sleepModeMutex);
+static bool chipAwake = true;
+static bool hadFingerDown = false;
+static bool isDeviceSleeping = false;
+static bool isDeviceSuspend = false;
+static struct input_dev *input_dev;
+static struct IT7260_ts_data *gl_ts;
+
+#define LOGE(...) pr_err(DEVICE_NAME ": " __VA_ARGS__)
+#define LOGI(...) printk(DEVICE_NAME ": " __VA_ARGS__)
+
+/* internal use func - does not make sure chip is ready before read */
+static bool i2cReadNoReadyCheck(uint8_t bufferIndex, uint8_t *dataBuffer, uint16_t dataLength)
+{
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = gl_ts->client->addr,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = &bufferIndex
+ },
+ {
+ .addr = gl_ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = dataLength,
+ .buf = dataBuffer
+ }
+ };
+
+ memset(dataBuffer, 0xFF, dataLength);
+
+ return i2c_transfer(gl_ts->client->adapter, msgs, 2);
+}
+
+static bool i2cWriteNoReadyCheck(uint8_t bufferIndex, const uint8_t *dataBuffer, uint16_t dataLength)
+{
+ uint8_t txbuf[257];
+ struct i2c_msg msg = {
+ .addr = gl_ts->client->addr,
+ .flags = 0,
+ .len = dataLength + 1,
+ .buf = txbuf
+ };
+
+ /* just to be careful */
+ BUG_ON(dataLength > sizeof(txbuf) - 1);
+
+ txbuf[0] = bufferIndex;
+ memcpy(txbuf + 1, dataBuffer, dataLength);
+
+ return i2c_transfer(gl_ts->client->adapter, &msg, 1);
+}
+
+/*
+ * Device is apparently always ready for i2c but not for actual register reads/writes.
+ * This function ascertains it is ready for that too. the results of this call often
+ * were ignored.
+ */
+static bool waitDeviceReady(bool forever, bool slowly)
+{
+ uint8_t ucQuery;
+ uint32_t count = DEVICE_READY_MAX_WAIT;
+
+ do{
+ if (!i2cReadNoReadyCheck(BUF_QUERY, &ucQuery, sizeof(ucQuery)))
+ ucQuery = CMD_STATUS_BUSY;
+
+ if (slowly)
+ mdelay(1000);
+ if (!forever)
+ count--;
+
+ }while((ucQuery & CMD_STATUS_BUSY) && count);
+
+ return !ucQuery;
+}
+
+static bool i2cRead(uint8_t bufferIndex, uint8_t *dataBuffer, uint16_t dataLength)
+{
+ waitDeviceReady(false, false);
+ return i2cReadNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+}
+
+static bool i2cWrite(uint8_t bufferIndex, const uint8_t *dataBuffer, uint16_t dataLength)
+{
+ waitDeviceReady(false, false);
+ return i2cWriteNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+}
+
+static bool chipFirmwareReinitialize(uint8_t cmdOfChoice)
+{
+ uint8_t cmd[] = {cmdOfChoice};
+ uint8_t rsp[2];
+
+ if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ return false;
+
+ if (!i2cRead(BUF_RESPONSE, rsp, sizeof(rsp)))
+ return false;
+
+ /* a reply of two zero bytes signifies success */
+ return !rsp[0] && !rsp[1];
+}
+
+static bool chipFirmwareUpgradeModeEnterExit(bool enter)
+{
+ uint8_t cmd[] = {CMD_FIRMWARE_UPGRADE, 0, 'I', 'T', '7', '2', '6', '0', 0x55, 0xAA};
+ uint8_t resp[2];
+
+ cmd[1] = enter ? FIRMWARE_MODE_ENTER : FIRMWARE_MODE_EXIT;
+ if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ return false;
+
+ if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ return false;
+
+ /* a reply of two zero bytes signifies success */
+ return !resp[0] && !resp[1];
+}
+
+static bool chipSetStartOffset(uint16_t offset)
+{
+ uint8_t cmd[] = {CMD_SET_START_OFFSET, 0, CMD_UINT16(offset)};
+ uint8_t resp[2];
+
+ if (!i2cWrite(BUF_COMMAND, cmd, 4))
+ return false;
+
+
+ if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ return false;
+
+
+ /* a reply of two zero bytes signifies success */
+ return !resp[0] && !resp[1];
+}
+
+
+/* write fwLength bytes from fwData at chip offset writeStartOffset */
+static bool chipFlashWriteAndVerify(unsigned int fwLength, const uint8_t *fwData, uint16_t writeStartOffset)
+{
+ uint32_t curDataOfst;
+
+ for (curDataOfst = 0; curDataOfst < fwLength; curDataOfst += FW_WRITE_CHUNK_SIZE) {
+
+ uint8_t cmdWrite[2 + FW_WRITE_CHUNK_SIZE] = {CMD_FW_WRITE};
+ uint8_t bufRead[FW_WRITE_CHUNK_SIZE];
+ uint8_t cmdRead[2] = {CMD_FW_READ};
+ unsigned i, nRetries;
+ uint32_t curWriteSz;
+
+ /* figure out how much to write */
+ curWriteSz = fwLength - curDataOfst;
+ if (curWriteSz > FW_WRITE_CHUNK_SIZE)
+ curWriteSz = FW_WRITE_CHUNK_SIZE;
+
+ /* prepare the write command */
+ cmdWrite[1] = curWriteSz;
+ for (i = 0; i < curWriteSz; i++)
+ cmdWrite[i + 2] = fwData[curDataOfst + i];
+
+ /* prepare the read command */
+ cmdRead[1] = curWriteSz;
+
+ for (nRetries = 0; nRetries < FW_WRITE_RETRY_COUNT; nRetries++) {
+
+ /* set write offset and write the data*/
+ chipSetStartOffset(writeStartOffset + curDataOfst);
+ i2cWrite(BUF_COMMAND, cmdWrite, 2 + curWriteSz);
+
+ /* set offset and read the data back */
+ chipSetStartOffset(writeStartOffset + curDataOfst);
+ i2cWrite(BUF_COMMAND, cmdRead, sizeof(cmdRead));
+ i2cRead(BUF_RESPONSE, bufRead, curWriteSz);
+
+ /* verify. If success break out of retry loop */
+ for (i = 0; i < curWriteSz && bufRead[i] == cmdWrite[i + 2]; i++);
+ if (i == curWriteSz)
+ break;
+ LOGE("write of data offset %u failed on try %u at byte %u/%u\n", curDataOfst, nRetries, i, curWriteSz);
+ }
+ /* if we've failed after all the retries, tell the caller */
+ if (nRetries == FW_WRITE_RETRY_COUNT)
+ return false;
+ }
+
+ return true;
+}
+
+static bool chipFirmwareUpload(uint32_t fwLen, const uint8_t *fwData, uint32_t cfgLen, const uint8_t *cfgData)
+{
+ bool success = false;
+
+ /* enter fw upload mode */
+ if (!chipFirmwareUpgradeModeEnterExit(true))
+ return false;
+
+ /* flash the firmware if requested */
+ if (fwLen && fwData && !chipFlashWriteAndVerify(fwLen, fwData, 0)) {
+ LOGE("failed to upload touch firmware\n");
+ goto out;
+ }
+
+ /* flash config data if requested */
+ if (fwLen && fwData && !chipFlashWriteAndVerify(cfgLen, cfgData, CHIP_FLASH_SIZE - cfgLen)) {
+ LOGE("failed to upload touch cfg data\n");
+ goto out;
+ }
+
+ success = true;
+
+out:
+ return chipFirmwareUpgradeModeEnterExit(false) && chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F) && success;
+}
+
+/*
+ * both buffers should be VERSION_LENGTH in size,
+ * but only a part of them is significant
+ */
+static bool chipGetVersions(uint8_t *verFw, uint8_t *verCfg, bool logIt)
+{
+ /* this code to get versions is reproduced as was written, but it does not make sense. Something here *PROBABLY IS* wrong */
+ static const uint8_t cmdReadFwVer[] = {CMD_READ_VERSIONS, VER_FIRMWARE};
+ static const uint8_t cmdReadCfgVer[] = {CMD_READ_VERSIONS, VER_CONFIG};
+ bool ret = true;
+
+ /* this structure is so that we definitely do all the calls, but still return a status in case anyone cares */
+ ret = i2cWrite(BUF_COMMAND, cmdReadFwVer, sizeof(cmdReadFwVer)) && ret;
+ ret = i2cRead(BUF_RESPONSE, verFw, VERSION_LENGTH) && ret;
+ ret = i2cWrite(BUF_COMMAND, cmdReadCfgVer, sizeof(cmdReadCfgVer)) && ret;
+ ret = i2cRead(BUF_RESPONSE, verCfg, VERSION_LENGTH) && ret;
+
+ if (logIt)
+ LOGI("current versions: fw@{%X,%X,%X,%X}, cfg@{%X,%X,%X,%X}\n",
+ verFw[5], verFw[6], verFw[7], verFw[8],
+ verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
+
+ return ret;
+}
+
+static ssize_t sysfsUpgradeStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ const struct firmware *fw, *cfg;
+ uint8_t verFw[10], verCfg[10];
+ unsigned fwLen = 0, cfgLen = 0;
+ bool manualUpgrade, success;
+ int mode = 0;
+
+ if (request_firmware(&fw, "it7260.fw", dev))
+ LOGE("failed to get firmware for it7260\n");
+ else
+ fwLen = fw->size;
+
+
+ if (request_firmware(&cfg, "it7260.cfg", dev))
+ LOGE("failed to get config data for it7260\n");
+ else
+ cfgLen = cfg->size;
+
+ sscanf(buf, "%d", &mode);
+ manualUpgrade = mode == SYSFS_FW_UPLOAD_MODE_MANUAL;
+ LOGI("firmware found %ub of fw and %ub of config in %s mode\n",
+ fwLen, cfgLen, manualUpgrade ? "manual" : "normal");
+
+ chipGetVersions(verFw, verCfg, true);
+
+ fwUploadResult = SYSFS_RESULT_NOT_DONE;
+ if (fwLen && cfgLen) {
+ if (manualUpgrade || (verFw[5] < fw->data[8] || verFw[6] <
+ fw->data[9] || verFw[7] < fw->data[10] || verFw[8] <
+ fw->data[11]) || (verCfg[1] < cfg->data[cfgLen - 8]
+ || verCfg[2] < cfg->data[cfgLen - 7] || verCfg[3] <
+ cfg->data[cfgLen - 6] ||
+ verCfg[4] < cfg->data[cfgLen - 5])){
+ LOGI("firmware/config will be upgraded\n");
+ disable_irq(gl_ts->client->irq);
+ success = chipFirmwareUpload(fwLen, fw->data, cfgLen, cfg->data);
+ enable_irq(gl_ts->client->irq);
+
+ fwUploadResult = success ? SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
+ LOGI("upload %s\n", success ? "success" : "failed");
+ }
+ else {
+ LOGI("firmware/config upgrade not needed\n");
+ }
+ }
+
+ if (fwLen)
+ release_firmware(fw);
+
+ if (cfgLen)
+ release_firmware(cfg);
+
+ return count;
+}
+
+static ssize_t sysfsUpgradeShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", fwUploadResult);
+}
+
+static ssize_t sysfsCalibrationShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", calibrationWasSuccessful);
+}
+
+static bool chipSendCalibrationCmd(bool autoTuneOn)
+{
+ uint8_t cmdCalibrate[] = {CMD_CALIBRATE, 0, autoTuneOn ? 1 : 0, 0, 0};
+ return i2cWrite(BUF_COMMAND, cmdCalibrate, sizeof(cmdCalibrate));
+}
+
+static ssize_t sysfsCalibrationStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ uint8_t resp;
+
+ if (!chipSendCalibrationCmd(false))
+ LOGE("failed to send calibration command\n");
+ else {
+ calibrationWasSuccessful = i2cRead(BUF_RESPONSE, &resp, sizeof(resp)) ? SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
+
+ /* previous logic that was here never called chipFirmwareReinitialize() due to checking a guaranteed-not-null value against null. We now call it. Hopefully this is OK */
+ if (!resp)
+ LOGI("chipFirmwareReinitialize -> %s\n", chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F) ? "success" : "fail");
+ }
+
+ return count;
+}
+
+static ssize_t sysfsPointShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ uint8_t pointData[sizeof(struct PointData)];
+ bool readSuccess;
+ ssize_t ret;
+
+ readSuccess = i2cReadNoReadyCheck(BUF_POINT_INFO, pointData, sizeof(pointData));
+ ret = sprintf(buf, "point_show read ret[%d]--point[%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x]=\n",
+ readSuccess, pointData[0],pointData[1],pointData[2],pointData[3],
+ pointData[4],pointData[5],pointData[6],pointData[7],pointData[8],
+ pointData[9],pointData[10],pointData[11],pointData[12],pointData[13]);
+
+
+ LOGI("%s", buf);
+
+ return ret;
+}
+
+static ssize_t sysfsPointStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ return count;
+}
+
+static ssize_t sysfsStatusShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", devicePresent ? 1 : 0);
+}
+
+static ssize_t sysfsStatusStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ uint8_t verFw[10], verCfg[10];
+
+ chipGetVersions(verFw, verCfg, true);
+
+ return count;
+}
+
+static ssize_t sysfsVersionShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ uint8_t verFw[10], verCfg[10];
+
+ chipGetVersions(verFw, verCfg, false);
+ return sprintf(buf, "%x,%x,%x,%x # %x,%x,%x,%x\n",verFw[5], verFw[6], verFw[7], verFw[8], verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
+}
+
+static ssize_t sysfsVersionStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ return count;
+}
+
+static ssize_t sysfsSleepShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ /*
+ * The usefulness of this was questionable at best - we were at least leaking a byte of
+ * kernel data (by claiming to return a byte but not writing to buf. To fix this now
+ * we actually return the sleep status
+ */
+ if (!mutex_lock_interruptible(&sleepModeMutex)) {
+ *buf = chipAwake ? '1' : '0';
+ mutex_unlock(&sleepModeMutex);
+ return 1;
+ }
+ else
+ return -EINTR;
+}
+
+static ssize_t sysfsSleepStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ static const uint8_t cmdGoSleep[] = {CMD_PWR_CTL, 0x00, PWR_CTL_SLEEP_MODE};
+ int goToSleepVal;
+ bool goToWake;
+ uint8_t dummy;
+
+ sscanf(buf, "%d", &goToSleepVal);
+ goToWake = !goToSleepVal; /* convert to bool of proper polarity */
+
+ if (!mutex_lock_interruptible(&sleepModeMutex)) {
+ if ((chipAwake && goToWake) || (!chipAwake && !goToWake))
+ LOGE("duplicate request to %s chip\n", goToWake ? "wake" : "sleep");
+ else if (goToWake) {
+ i2cReadNoReadyCheck(BUF_QUERY, &dummy, sizeof(dummy));
+ enable_irq(gl_ts->client->irq);
+ LOGI("touch is going to wake!\n\n");
+ } else {
+ disable_irq(gl_ts->client->irq);
+ i2cWriteNoReadyCheck(BUF_COMMAND, cmdGoSleep, sizeof(cmdGoSleep));
+ LOGI("touch is going to sleep...\n\n");
+ }
+ chipAwake = goToWake;
+ mutex_unlock(&sleepModeMutex);
+ return count;
+ } else
+ return -EINTR;
+}
+
+
+static DEVICE_ATTR(status, S_IRUGO|S_IWUSR|S_IWGRP, sysfsStatusShow, sysfsStatusStore);
+static DEVICE_ATTR(version, S_IRUGO|S_IWUSR|S_IWGRP, sysfsVersionShow, sysfsVersionStore);
+static DEVICE_ATTR(sleep, S_IRUGO|S_IWUSR|S_IWGRP, sysfsSleepShow, sysfsSleepStore);
+
+static struct attribute *it7260_attrstatus[] = {
+ &dev_attr_status.attr,
+ &dev_attr_version.attr,
+ &dev_attr_sleep.attr,
+ NULL
+};
+
+static const struct attribute_group it7260_attrstatus_group = {
+ .attrs = it7260_attrstatus,
+};
+
+static DEVICE_ATTR(calibration, S_IRUGO|S_IWUSR|S_IWGRP, sysfsCalibrationShow, sysfsCalibrationStore);
+static DEVICE_ATTR(upgrade, S_IRUGO|S_IWUSR|S_IWGRP, sysfsUpgradeShow, sysfsUpgradeStore);
+static DEVICE_ATTR(point, S_IRUGO|S_IWUSR|S_IWGRP, sysfsPointShow, sysfsPointStore);
+
+
+static struct attribute *it7260_attributes[] = {
+ &dev_attr_calibration.attr,
+ &dev_attr_upgrade.attr,
+ &dev_attr_point.attr,
+ NULL
+};
+
+static const struct attribute_group it7260_attr_group = {
+ .attrs = it7260_attributes,
+};
+
+static void chipExternalCalibration(bool autoTuneEnabled)
+{
+ uint8_t resp[2];
+
+ LOGI("sent calibration command -> %d\n", chipSendCalibrationCmd(autoTuneEnabled));
+ waitDeviceReady(true, true);
+ i2cReadNoReadyCheck(BUF_RESPONSE, resp, sizeof(resp));
+ chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_C);
+}
+
+void sendCalibrationCmd(void)
+{
+ chipExternalCalibration(false);
+}
+EXPORT_SYMBOL(sendCalibrationCmd);
+
+static void readFingerData(uint16_t *xP, uint16_t *yP, uint8_t *pressureP, const struct FingerData *fd)
+{
+ uint16_t x = fd->xLo;
+ uint16_t y = fd->yLo;
+
+ x += ((uint16_t)(fd->hi & 0x0F)) << 8;
+ y += ((uint16_t)(fd->hi & 0xF0)) << 4;
+
+ if (xP)
+ *xP = x;
+ if (yP)
+ *yP = y;
+ if (pressureP)
+ *pressureP = fd->pressure & FD_PRESSURE_BITS;
+}
+
+static void readTouchDataPoint(void)
+{
+ struct PointData pointData;
+ uint8_t devStatus;
+ uint8_t pressure = FD_PRESSURE_NONE;
+ uint16_t x, y;
+
+ /* verify there is point data to read & it is readable and valid */
+ i2cReadNoReadyCheck(BUF_QUERY, &devStatus, sizeof(devStatus));
+ if (!((devStatus & PT_INFO_BITS) & PT_INFO_YES)) {
+ LOGE("readTouchDataPoint() called when no data available (0x%02X)\n", devStatus);
+ return;
+ }
+ if (!i2cReadNoReadyCheck(BUF_POINT_INFO, (void*)&pointData, sizeof(pointData))) {
+ LOGE("readTouchDataPoint() failed to read point data buffer\n");
+ return;
+ }
+ if ((pointData.flags & PD_FLAGS_DATA_TYPE_BITS) != PD_FLAGS_DATA_TYPE_TOUCH) {
+ LOGE("readTouchDataPoint() dropping non-point data of type 0x%02X\n", pointData.flags);
+ return;
+ }
+
+ if ((pointData.flags & PD_FLAGS_HAVE_FINGERS) & 1)
+ readFingerData(&x, &y, &pressure, pointData.fd);
+
+ if (pressure >= FD_PRESSURE_LIGHT) {
+
+ if (!hadFingerDown)
+ hadFingerDown = true;
+
+ readFingerData(&x, &y, &pressure, pointData.fd);
+
+ input_report_abs(gl_ts->input_dev, ABS_X, x);
+ input_report_abs(gl_ts->input_dev, ABS_Y, y);
+ input_report_key(gl_ts->input_dev, BTN_TOUCH, 1);
+ input_sync(gl_ts->input_dev);
+
+ } else if (hadFingerDown) {
+ hadFingerDown = false;
+
+ input_report_key(gl_ts->input_dev, BTN_TOUCH, 0);
+ input_sync(gl_ts->input_dev);
+ }
+
+}
+
+static irqreturn_t IT7260_ts_threaded_handler(int irq, void *devid)
+{
+ smp_rmb();
+ if (isDeviceSleeping) {
+ smp_wmb();
+ } else {
+ readTouchDataPoint();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool chipIdentifyIT7260(void)
+{
+ static const uint8_t cmdIdent[] = {CMD_IDENT_CHIP};
+ static const uint8_t expectedID[] = {0x0A, 'I', 'T', 'E', '7', '2', '6', '0'};
+ uint8_t chipID[10] = {0,};
+
+ waitDeviceReady(true, false);
+
+ if (!i2cWriteNoReadyCheck(BUF_COMMAND, cmdIdent, sizeof(cmdIdent))) {
+ LOGE("i2cWrite() failed\n");
+ return false;
+ }
+
+ waitDeviceReady(true, false);
+
+ if (!i2cReadNoReadyCheck(BUF_RESPONSE, chipID, sizeof(chipID))) {
+ LOGE("i2cRead() failed\n");
+ return false;
+ }
+ pr_info("chipIdentifyIT7260 read id: %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ chipID[0], chipID[1], chipID[2], chipID[3], chipID[4],
+ chipID[5], chipID[6], chipID[7], chipID[8], chipID[9]);
+
+ if (memcmp(chipID, expectedID, sizeof(expectedID)))
+ return false;
+
+ if (chipID[8] == '5' && chipID[9] == '6')
+ LOGI("rev BX3 found\n");
+ else if (chipID[8] == '6' && chipID[9] == '6')
+ LOGI("rev BX4 found\n");
+ else
+ LOGI("unknown revision (0x%02X 0x%02X) found\n", chipID[8], chipID[9]);
+
+ return true;
+}
+
+static int IT7260_ts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ static const uint8_t cmdStart[] = {CMD_UNKNOWN_7};
+ struct IT7260_i2c_platform_data *pdata;
+ uint8_t rsp[2];
+ int ret = -1;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ LOGE("need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ if (!client->irq) {
+ LOGE("need IRQ\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+ gl_ts = kzalloc(sizeof(*gl_ts), GFP_KERNEL);
+ if (!gl_ts) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ gl_ts->client = client;
+ i2c_set_clientdata(client, gl_ts);
+ pdata = client->dev.platform_data;
+
+ if (sysfs_create_group(&(client->dev.kobj), &it7260_attrstatus_group)) {
+ dev_err(&client->dev, "failed to register sysfs #1\n");
+ goto err_sysfs_grp_create_1;
+ }
+
+ if (!chipIdentifyIT7260()) {
+ LOGI ("chipIdentifyIT7260 FAIL");
+ goto err_ident_fail_or_input_alloc;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ LOGE("failed to allocate input device\n");
+ ret = -ENOMEM;
+ goto err_ident_fail_or_input_alloc;
+ }
+ gl_ts->input_dev = input_dev;
+
+ input_dev->name = DEVICE_NAME;
+ input_dev->phys = "I2C";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x7260;
+ set_bit(EV_SYN, input_dev->evbit);
+ set_bit(EV_KEY, input_dev->evbit);
+ set_bit(EV_ABS, input_dev->evbit);
+ set_bit(INPUT_PROP_DIRECT,input_dev->propbit);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(KEY_SLEEP,input_dev->keybit);
+ set_bit(KEY_WAKEUP,input_dev->keybit);
+ set_bit(KEY_POWER,input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_X, 0, SCREEN_X_RESOLUTION, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, SCREEN_Y_RESOLUTION, 0, 0);
+
+ if (input_register_device(input_dev)) {
+ LOGE("failed to register input device\n");
+ goto err_input_register;
+ }
+
+ if (request_threaded_irq(client->irq, NULL, IT7260_ts_threaded_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, gl_ts)) {
+ dev_err(&client->dev, "request_irq failed\n");
+ goto err_irq_reg;
+ }
+
+ if (sysfs_create_group(&(client->dev.kobj), &it7260_attr_group)) {
+ dev_err(&client->dev, "failed to register sysfs #2\n");
+ goto err_sysfs_grp_create_2;
+ }
+
+ devicePresent = true;
+
+ i2cWriteNoReadyCheck(BUF_COMMAND, cmdStart, sizeof(cmdStart));
+ mdelay(10);
+ i2cReadNoReadyCheck(BUF_RESPONSE, rsp, sizeof(rsp));
+ mdelay(10);
+
+ return 0;
+
+err_sysfs_grp_create_2:
+ free_irq(client->irq, gl_ts);
+
+err_irq_reg:
+ input_unregister_device(input_dev);
+ input_dev = NULL;
+
+err_input_register:
+ if (input_dev)
+ input_free_device(input_dev);
+
+err_ident_fail_or_input_alloc:
+ sysfs_remove_group(&(client->dev.kobj), &it7260_attrstatus_group);
+
+err_sysfs_grp_create_1:
+ kfree(gl_ts);
+
+err_out:
+ return ret;
+}
+
+static int IT7260_ts_remove(struct i2c_client *client)
+{
+ devicePresent = false;
+ return 0;
+}
+
+static const struct i2c_device_id IT7260_ts_id[] = {
+ { DEVICE_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, IT7260_ts_id);
+
+static const struct of_device_id IT7260_match_table[] = {
+ { .compatible = "ITE,IT7260_ts",},
+ {},
+};
+
+static int IT7260_ts_resume(struct i2c_client *i2cdev)
+{
+ isDeviceSuspend = false;
+ return 0;
+}
+
+static int IT7260_ts_suspend(struct i2c_client *i2cdev, pm_message_t pmesg)
+{
+ isDeviceSuspend = true;
+ return 0;
+}
+
+static struct i2c_driver IT7260_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME,
+ .of_match_table = IT7260_match_table,
+ },
+ .probe = IT7260_ts_probe,
+ .remove = IT7260_ts_remove,
+ .id_table = IT7260_ts_id,
+ .resume = IT7260_ts_resume,
+ .suspend = IT7260_ts_suspend,
+};
+
+module_i2c_driver(IT7260_ts_driver);
+
+MODULE_DESCRIPTION("IT7260 Touchscreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index a76ccc06c9e1..d42ada769380 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -101,29 +101,21 @@ static void msm_vfe40_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask, vfe_dev->vfe_base + 0x2C);
}
static int32_t msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev,
@@ -335,10 +327,8 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
msm_vfe40_init_vbif_parms(vfe_dev, &vbif_parms);
/* BUS_CFG */
msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xFEFFFFFF;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0x800000E0, 0xFEFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
@@ -346,15 +336,13 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
}
static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev)
{
vfe_dev->irq0_mask = (1 << 31);
vfe_dev->irq1_mask = 0;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe40_config_irq(vfe_dev, (1 << 31), 0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -589,7 +577,6 @@ static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe40_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -812,11 +799,9 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
comp_mask |= (axi_data->composite_info[comp_mask_index].
stream_composite_mask << (comp_mask_index * 8));
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
-
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -828,27 +813,24 @@ static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
-
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_cfg_framedrop(void __iomem *vfe_base,
@@ -1088,10 +1070,8 @@ static void msm_vfe40_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w((fe_cfg->fetch_height - 1),
vfe_dev->vfe_base + 0x238);
@@ -1382,13 +1362,11 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
- msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0x81, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
- vfe_dev->irq0_mask |= 0xF7;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0xF7, 0x81,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
bus_en =
@@ -1413,8 +1391,8 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- msm_vfe40_config_irq(vfe_dev, 0, 0,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
@@ -1897,6 +1875,9 @@ static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1904,6 +1885,9 @@ static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
@@ -1919,20 +1903,18 @@ static void msm_vfe40_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |=
- 1 << (STATS_IDX(stream_info->stream_handle) + 16);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 16), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 16));
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev,
+ (1 << (STATS_IDX(stream_info->stream_handle) + 16)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 08b20395813c..388656b9ca30 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -70,30 +70,22 @@ static void msm_vfe44_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
}
static int32_t msm_vfe44_init_dt_parms(struct vfe_device *vfe_dev,
@@ -181,10 +173,8 @@ static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xFFFFFFFF;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 0x800000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
@@ -193,9 +183,7 @@ static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev)
static void msm_vfe44_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe44_config_irq(vfe_dev, 0x80000000, 0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -419,7 +407,6 @@ static void msm_vfe44_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe44_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -650,9 +637,8 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -664,25 +650,22 @@ static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_cfg_framedrop(void __iomem *vfe_base,
@@ -918,10 +901,8 @@ static void msm_vfe44_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_SET);
msm_camera_io_w((fe_cfg->fetch_height - 1) & 0xFFF,
vfe_dev->vfe_base + 0x238);
@@ -1045,13 +1026,12 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
- msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0x81, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
- vfe_dev->irq0_mask |= 0xF7;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 0xF7, 0x81,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
bus_en =
@@ -1075,7 +1055,7 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
msm_vfe44_config_irq(vfe_dev, 0,
- 0, MSM_ISP_IRQ_SET);
+ 0x81, MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0xC18);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0xC18);
@@ -1526,6 +1506,9 @@ static void msm_vfe44_stats_cfg_comp_mask(
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1540,6 +1523,9 @@ static void msm_vfe44_stats_cfg_comp_mask(
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
@@ -1555,20 +1541,18 @@ static void msm_vfe44_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |=
- 1 << (STATS_IDX(stream_info->stream_handle) + 15);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev,
+ (1 << (STATS_IDX(stream_info->stream_handle) + 15)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 9f815e65edc8..40bb044fde47 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -92,30 +92,24 @@ static void msm_vfe46_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x5C);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask,
+ vfe_dev->vfe_base + 0x60);
}
static int32_t msm_vfe46_init_dt_parms(struct vfe_device *vfe_dev,
@@ -208,20 +202,16 @@ static void msm_vfe46_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x84);
/* IRQ_MASK/CLEAR */
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xE1FFFFFF;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
}
static void msm_vfe46_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0x0;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0x80000000, 0, MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
@@ -355,7 +345,6 @@ static void msm_vfe46_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x3D0);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe46_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -587,9 +576,8 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -601,25 +589,22 @@ static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_cfg_framedrop(void __iomem *vfe_base,
@@ -857,10 +842,8 @@ static void msm_vfe46_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << 24, 0,
+ MSM_ISP_IRQ_ENABLE);
temp = fe_cfg->fetch_height - 1;
msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x278);
@@ -1120,9 +1103,11 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- vfe_dev->irq0_mask |= 0xF5;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_vfe46_config_irq(vfe_dev, 0x15, 0x81,
+ MSM_ISP_IRQ_ENABLE);
bus_en =
((vfe_dev->axi_data.
@@ -1148,7 +1133,8 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- msm_vfe46_config_irq(vfe_dev, 0, 0, MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
/* disable danger signal */
val = msm_camera_io_r(vfe_dev->vfe_base + 0xC18);
val &= ~(1 << 8);
@@ -1611,6 +1597,9 @@ static void msm_vfe46_stats_cfg_comp_mask(
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1625,6 +1614,9 @@ static void msm_vfe46_stats_cfg_comp_mask(
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
@@ -1640,19 +1632,18 @@ static void msm_vfe46_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (STATS_IDX(stream_info->stream_handle) + 15);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 20aa69f322db..290f100ffeba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -149,30 +149,24 @@ void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x5C);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask,
+ vfe_dev->vfe_base + 0x60);
}
static int32_t msm_vfe47_init_dt_parms(struct vfe_device *vfe_dev,
@@ -285,13 +279,6 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
else
id = CAM_AHB_CLIENT_VFE1;
- rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
- if (rc < 0) {
- pr_err("%s: failed to vote for AHB\n", __func__);
- goto ahb_vote_fail;
- }
- vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
-
rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(
vfe_dev, 1);
if (rc)
@@ -302,6 +289,13 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
if (rc)
goto clk_enable_failed;
+ rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+ vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
+
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
vfe_dev->vfe_base;
@@ -312,14 +306,14 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
return rc;
irq_enable_fail:
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
-clk_enable_failed:
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
-enable_regulators_failed:
if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
pr_err("%s: failed to remove vote for AHB\n", __func__);
vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
ahb_vote_fail:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
+clk_enable_failed:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
+enable_regulators_failed:
return rc;
}
@@ -338,9 +332,6 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
msm_isp_flush_tasklet(vfe_dev);
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
- vfe_dev, 0);
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
@@ -351,7 +342,12 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
pr_err("%s: failed to vote for AHB\n", __func__);
- vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+
+ vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
+ vfe_dev, 0);
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
}
void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
@@ -382,19 +378,16 @@ void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
/* IRQ_MASK/CLEAR */
- vfe_dev->irq0_mask = 0xE00000F3;
- vfe_dev->irq1_mask = 0xFFFFFFFF;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
}
void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0x0;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe47_config_irq(vfe_dev, 0x80000000, 0x0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
@@ -543,7 +536,6 @@ void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
/* mask off camif error after first occurrance */
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe47_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -785,9 +777,8 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -799,25 +790,22 @@ void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
@@ -1065,10 +1053,8 @@ void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_ENABLE);
temp = fe_cfg->fetch_height - 1;
msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x308);
@@ -1394,9 +1380,11 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
if (update_state == ENABLE_CAMIF) {
- vfe_dev->irq0_mask |= 0xF5;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_vfe47_config_irq(vfe_dev, 0x15, 0x81,
+ MSM_ISP_IRQ_ENABLE);
if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
(vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
@@ -1427,8 +1415,9 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
/* For testgen always halt on camif boundary */
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- /* turn off all irq before camif disable */
- msm_vfe47_config_irq(vfe_dev, 0, 0, MSM_ISP_IRQ_SET);
+ /* turn off camif violation and error irqs */
+ msm_vfe47_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
@@ -1896,6 +1885,8 @@ void msm_vfe47_stats_cfg_comp_mask(
comp_mask_reg |= stats_mask << (request_comp_index * 16);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1903,6 +1894,8 @@ void msm_vfe47_stats_cfg_comp_mask(
atomic_set(stats_comp_mask,
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
@@ -1919,49 +1912,39 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- uint32_t irq_mask;
- uint32_t irq_mask_1;
-
- irq_mask = vfe_dev->irq0_mask;
- irq_mask_1 = vfe_dev->irq1_mask;
-
switch (STATS_IDX(stream_info->stream_handle)) {
case STATS_COMP_IDX_AEC_BG:
- irq_mask |= 1 << 15;
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- irq_mask |= 1 << 16;
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BG:
- irq_mask |= 1 << 17;
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BF:
- irq_mask |= 1 << 18;
- irq_mask_1 |= 1 << 26;
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- irq_mask |= 1 << 19;
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_RS:
- irq_mask |= 1 << 20;
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_CS:
- irq_mask |= 1 << 21;
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_IHIST:
- irq_mask |= 1 << 22;
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BHIST:
- irq_mask |= 1 << 23;
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_ENABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
STATS_IDX(stream_info->stream_handle));
}
-
- msm_vfe47_config_irq(vfe_dev, irq_mask, irq_mask_1, MSM_ISP_IRQ_SET);
- vfe_dev->irq0_mask = irq_mask;
- vfe_dev->irq1_mask = irq_mask_1;
}
void msm_vfe47_stats_clear_wm_irq_mask(
@@ -1975,41 +1958,37 @@ void msm_vfe47_stats_clear_wm_irq_mask(
switch (STATS_IDX(stream_info->stream_handle)) {
case STATS_COMP_IDX_AEC_BG:
- irq_mask &= ~(1 << 15);
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- irq_mask &= ~(1 << 16);
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BG:
- irq_mask &= ~(1 << 17);
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BF:
- irq_mask &= ~(1 << 18);
- irq_mask_1 &= ~(1 << 26);
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- irq_mask &= ~(1 << 19);
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_RS:
- irq_mask &= ~(1 << 20);
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_CS:
- irq_mask &= ~(1 << 21);
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_IHIST:
- irq_mask &= ~(1 << 22);
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BHIST:
- irq_mask &= ~(1 << 23);
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
STATS_IDX(stream_info->stream_handle));
}
-
- msm_vfe47_config_irq(vfe_dev, irq_mask, irq_mask_1, MSM_ISP_IRQ_SET);
- vfe_dev->irq0_mask = irq_mask;
- vfe_dev->irq1_mask = irq_mask_1;
}
void msm_vfe47_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 3dd55e02826d..8721fc18eaa8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1188,15 +1188,9 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
msm_isp_start_avtimer();
}
- if (stream_info->num_planes > 1) {
+ if (stream_info->num_planes > 1)
msm_isp_axi_reserve_comp_mask(
&vfe_dev->axi_data, stream_info);
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_comp_mask(vfe_dev, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
- }
for (i = 0; i < stream_info->num_planes; i++) {
vfe_dev->hw_info->vfe_ops.axi_ops.
@@ -1252,14 +1246,8 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
clear_wm_xbar_reg(vfe_dev, stream_info, i);
}
- if (stream_info->num_planes > 1) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_comp_mask(vfe_dev, stream_info);
+ if (stream_info->num_planes > 1)
msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
- }
vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
msm_isp_axi_free_wm(axi_data, stream_info);
@@ -2617,6 +2605,13 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
return rc;
}
spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
stream_info->state = START_PENDING;
@@ -2733,6 +2728,13 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
spin_unlock_irqrestore(&stream_info->lock, flags);
wait_for_complete_for_this_stream = 0;
+ if (stream_info->num_planes > 1)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
stream_info->state = STOP_PENDING;
if (!halt && !ext_read &&
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index e98c99fcb62d..4aef6b5c7f38 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -444,10 +444,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
-
if (stream_info->init_stats_frame_drop == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
stream_info);
@@ -485,10 +481,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
}
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
return 0;
@@ -711,6 +703,9 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s: No buffer for stream%d\n", __func__, idx);
return rc;
}
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
stream_info->state = STATS_START_PENDING;
@@ -784,6 +779,10 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
return -EINVAL;
}
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
stream_info->state = STATS_STOP_PENDING;
else
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index 4c0991e0dd26..c9656e748f09 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -58,6 +58,9 @@
static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable);
static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg);
+
int msm_ispif_get_clk_info(struct ispif_device *ispif_dev,
struct platform_device *pdev);
@@ -95,6 +98,192 @@ static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
{"camss_csi_vfe_clk", NO_SET_RATE},
};
+#ifdef CONFIG_COMPAT
+struct ispif_cfg_data_ext_32 {
+ enum ispif_cfg_type_t cfg_type;
+ compat_caddr_t data;
+ uint32_t size;
+};
+
+#define VIDIOC_MSM_ISPIF_CFG_EXT_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext_32)
+#endif
+
+static void msm_ispif_get_pack_mask_from_cfg(
+ struct msm_ispif_pack_cfg *pack_cfg,
+ struct msm_ispif_params_entry *entry,
+ uint32_t *pack_mask)
+{
+ int i;
+ uint32_t temp;
+
+ if (WARN_ON(!entry))
+ return;
+
+ memset(pack_mask, 0, sizeof(uint32_t) * 2);
+ for (i = 0; i < entry->num_cids; i++) {
+ temp = (pack_cfg[entry->cids[i]].pack_mode & 0x3)|
+ (pack_cfg[entry->cids[i]].even_odd_sel & 0x1) << 2 |
+ (pack_cfg[entry->cids[i]].pixel_swap_en & 0x1) << 3;
+ temp = (temp & 0xF) << ((entry->cids[i] % CID8) * 4);
+
+ if (entry->cids[i] > CID7)
+ pack_mask[1] |= temp;
+ else
+ pack_mask[0] |= temp;
+ CDBG("%s:num %d cid %d mode %d pack_mask %x %x\n",
+ __func__, entry->num_cids, entry->cids[i],
+ pack_cfg[i].pack_mode,
+ pack_mask[0], pack_mask[1]);
+
+ }
+}
+
+static int msm_ispif_config2(struct ispif_device *ispif,
+ void *data)
+{
+ int rc = 0, i = 0;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t pack_cfg_mask[2];
+ struct msm_ispif_param_data_ext *params =
+ (struct msm_ispif_param_data_ext *)data;
+
+ if (WARN_ON(!ispif) || WARN_ON(!params))
+ return -EINVAL;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+
+ CDBG("%s, num %d intftype %x, vfe_intf %d, csid %d\n", __func__,
+ params->num, intftype, vfe_intf,
+ params->entries[i].csid);
+
+ if ((intftype >= INTF_MAX) ||
+ (vfe_intf >= ispif->vfe_info.num_vfe) ||
+ (ispif->csid_version <= CSID_VERSION_V22 &&
+ (vfe_intf > VFE0))) {
+ pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+ __func__, vfe_intf, ispif->csid_version);
+ return -EINVAL;
+ }
+
+ msm_ispif_get_pack_mask_from_cfg(params->pack_cfg,
+ &params->entries[i], pack_cfg_mask);
+ msm_ispif_cfg_pack_mode(ispif, intftype, vfe_intf,
+ pack_cfg_mask);
+ }
+ return rc;
+}
+
+static long msm_ispif_cmd_ext(struct v4l2_subdev *sd,
+ void *arg)
+{
+ long rc = 0;
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+ struct ispif_cfg_data_ext pcdata;
+ struct msm_ispif_param_data_ext *params = NULL;
+#ifdef CONFIG_COMPAT
+ struct ispif_cfg_data_ext_32 *pcdata32 =
+ (struct ispif_cfg_data_ext_32 *)arg;
+
+ if (pcdata32 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata32->cfg_type;
+ pcdata.size = pcdata32->size;
+ pcdata.data = compat_ptr(pcdata32->data);
+
+#else
+ struct ispif_cfg_data_ext *pcdata64 =
+ (struct ispif_cfg_data_ext *)arg;
+
+ if (pcdata64 == NULL) {
+ pr_err("Invalid params passed from user\n");
+ return -EINVAL;
+ }
+ pcdata.cfg_type = pcdata64->cfg_type;
+ pcdata.size = pcdata64->size;
+ pcdata.data = pcdata64->data;
+#endif
+ if (pcdata.size != sizeof(struct msm_ispif_param_data_ext)) {
+ pr_err("%s: payload size mismatch\n", __func__);
+ return -EINVAL;
+ }
+
+ params = kzalloc(sizeof(struct msm_ispif_param_data_ext), GFP_KERNEL);
+ if (!params) {
+ CDBG("%s: params alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+ if (copy_from_user(params, (void __user *)(pcdata.data),
+ pcdata.size)) {
+ kfree(params);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ispif->mutex);
+ switch (pcdata.cfg_type) {
+ case ISPIF_CFG2:
+ rc = msm_ispif_config2(ispif, params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ default:
+ pr_err("%s: invalid cfg_type\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&ispif->mutex);
+ kfree(params);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ispif_subdev_ioctl_compat(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ if (WARN_ON(!sd))
+ return -EINVAL;
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISPIF_CFG_EXT_COMPAT:
+ return msm_ispif_cmd_ext(sd, arg);
+
+ default:
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+ }
+}
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ if (is_compat_task())
+ return msm_ispif_subdev_ioctl_compat(sd, cmd, arg);
+ else
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+}
+#else
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_ispif_subdev_ioctl_unlocked(sd, cmd, arg);
+}
+#endif
static void msm_ispif_put_regulator(struct ispif_device *ispif_dev)
{
int i;
@@ -649,7 +838,6 @@ static uint16_t msm_ispif_get_cids_mask_from_cfg(
{
int i;
uint16_t cids_mask = 0;
-
BUG_ON(!entry);
for (i = 0; i < entry->num_cids; i++)
@@ -657,14 +845,15 @@ static uint16_t msm_ispif_get_cids_mask_from_cfg(
return cids_mask;
}
-
static int msm_ispif_config(struct ispif_device *ispif,
- struct msm_ispif_param_data *params)
+ void *data)
{
int rc = 0, i = 0;
uint16_t cid_mask;
enum msm_ispif_intftype intftype;
enum msm_ispif_vfe_intf vfe_intf;
+ struct msm_ispif_param_data *params =
+ (struct msm_ispif_param_data *)data;
BUG_ON(!ispif);
BUG_ON(!params);
@@ -1415,7 +1604,7 @@ static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
}
static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
-static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+static long msm_ispif_subdev_ioctl_unlocked(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct ispif_device *ispif =
@@ -1424,6 +1613,8 @@ static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
switch (cmd) {
case VIDIOC_MSM_ISPIF_CFG:
return msm_ispif_cmd(sd, arg);
+ case VIDIOC_MSM_ISPIF_CFG_EXT:
+ return msm_ispif_cmd_ext(sd, arg);
case MSM_SD_NOTIFY_FREEZE: {
ispif->ispif_sof_debug = 0;
ispif->ispif_rdi0_debug = 0;
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
index b82fd34f2396..d488ca618537 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -115,4 +115,10 @@
#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x000001
#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+/* ISPIF RDI pack mode not supported */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+}
#endif /* __MSM_ISPIF_HWREG_V1_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
index 01dce6d45897..8ae61dc2d4f6 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -96,4 +96,9 @@
#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+/* ISPIF RDI pack mode not supported */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+}
#endif /* __MSM_ISPIF_HWREG_V2_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h
index 343575263816..94cc974441ee 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,8 +10,8 @@
* GNU General Public License for more details.
*/
-#ifndef __MSM_ISPIF_HWREG_V2_H__
-#define __MSM_ISPIF_HWREG_V2_H__
+#ifndef __MSM_ISPIF_HWREG_V3_H__
+#define __MSM_ISPIF_HWREG_V3_H__
/* common registers */
#define ISPIF_RST_CMD_ADDR 0x008
@@ -99,4 +99,38 @@
#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
-#endif /* __MSM_ISPIF_HWREG_V2_H__ */
+/* ISPIF RDI pack mode support */
+static inline void msm_ispif_cfg_pack_mode(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint32_t *pack_cfg_mask)
+{
+ uint32_t pack_addr[2];
+
+ if (WARN_ON(!ispif))
+ return;
+
+ switch (intftype) {
+ case RDI0:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 0);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 0);
+ break;
+ case RDI1:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 1);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 1);
+ break;
+ case RDI2:
+ pack_addr[0] = ISPIF_VFE_m_RDI_INTF_n_PACK_0(vfe_intf, 2);
+ pack_addr[1] = ISPIF_VFE_m_RDI_INTF_n_PACK_1(vfe_intf, 2);
+ break;
+ default:
+ pr_debug("%s: pack_mode not supported on intftype=%d\n",
+ __func__, intftype);
+ return;
+ }
+ pr_debug("%s: intftype %d pack_mask %x: 0x%x, %x:0x%x\n",
+ __func__, intftype, pack_addr[0],
+ pack_cfg_mask[0], pack_addr[1],
+ pack_cfg_mask[1]);
+ msm_camera_io_w_mb(pack_cfg_mask[0], ispif->base + pack_addr[0]);
+ msm_camera_io_w_mb(pack_cfg_mask[1], ispif->base + pack_addr[1]);
+}
+#endif /* __MSM_ISPIF_HWREG_V3_H__ */
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 32a7fe479966..d58684109395 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1448,6 +1448,12 @@ int create_pkt_cmd_session_set_property(
case HAL_RATE_CONTROL_VBR_VFR:
pkt->rg_property_data[1] = HFI_RATE_CONTROL_VBR_VFR;
break;
+ case HAL_RATE_CONTROL_MBR_CFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_CFR;
+ break;
+ case HAL_RATE_CONTROL_MBR_VFR:
+ pkt->rg_property_data[1] = HFI_RATE_CONTROL_MBR_VFR;
+ break;
default:
dprintk(VIDC_ERR,
"Invalid Rate control setting: %p\n",
@@ -1559,6 +1565,25 @@ int create_pkt_cmd_session_set_property(
sizeof(struct hfi_quantization_range);
break;
}
+ case HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED:
+ {
+ struct hfi_quantization_range *hfi;
+ struct hfi_quantization_range *hal_range =
+ (struct hfi_quantization_range *) pdata;
+
+ pkt->rg_property_data[0] =
+ HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+ hfi = (struct hfi_quantization_range *)
+ &pkt->rg_property_data[1];
+
+ hfi->min_qp = hal_range->min_qp;
+ hfi->max_qp = hal_range->max_qp;
+ hfi->layer_id = hal_range->layer_id;
+
+ pkt->size += sizeof(u32) +
+ sizeof(struct hfi_quantization_range);
+ break;
+ }
case HAL_PARAM_VENC_SEARCH_RANGE:
{
struct hfi_vc1e_perf_cfg_type *hfi;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 54cb04fcc4f0..d8c6e30204d1 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -615,6 +615,12 @@ static u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width)
return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_UBWC, width, height);
}
+static u32 get_frame_size_compressed_full_yuv(int plane,
+ u32 max_mbs_per_frame, u32 size_per_mb)
+{
+ return (max_mbs_per_frame * size_per_mb * 3 / 2);
+}
+
static u32 get_frame_size_compressed(int plane,
u32 max_mbs_per_frame, u32 size_per_mb)
{
@@ -791,7 +797,7 @@ struct msm_vidc_format vdec_formats[] = {
.description = "VP9 compressed format",
.fourcc = V4L2_PIX_FMT_VP9,
.num_planes = 1,
- .get_frame_size = get_frame_size_compressed,
+ .get_frame_size = get_frame_size_compressed_full_yuv,
.type = OUTPUT_PORT,
},
{
@@ -2835,4 +2841,3 @@ int msm_vdec_ctrl_init(struct msm_vidc_inst *inst)
return msm_comm_ctrl_init(inst, msm_vdec_ctrls,
ARRAY_SIZE(msm_vdec_ctrls), &msm_vdec_ctrl_ops);
}
-
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 5c9dc6a53223..0e668b93598f 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -73,6 +73,8 @@ static const char *const mpeg_video_rate_control[] = {
"VBR CFR",
"CBR VFR",
"CBR CFR",
+ "MBR CFR",
+ "MBR VFR",
NULL
};
@@ -233,7 +235,7 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.name = "Video Framerate and Bitrate Control",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
- .maximum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR,
+ .maximum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR,
.default_value = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF,
.step = 0,
.menu_skip_mask = ~(
@@ -241,7 +243,9 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR) |
(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR) |
(1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR) |
- (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR)
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR) |
+ (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR)
),
.qmenu = mpeg_video_rate_control,
},
@@ -529,6 +533,28 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.step = 1,
},
{
+ .id = V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED,
+ .name = "H264 Minimum QP PACKED",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0x00010101,
+ .maximum = 0x00333333,
+ .default_value = 0x00010101,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED,
+ .name = "H264 Maximum QP PACKED",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0x00010101,
+ .maximum = 0x00333333,
+ .default_value = 0x00333333,
+ .step = 1,
+ .menu_skip_mask = 0,
+ .qmenu = NULL,
+ },
+ {
.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
.name = "Slice Mode",
.type = V4L2_CTRL_TYPE_MENU,
@@ -2200,10 +2226,14 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR:
update_ctrl.val =
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR;
+ break;
case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR:
case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR:
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR:
+ case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR:
update_ctrl.val =
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
+ break;
}
final_mode = ctrl->val;
@@ -2508,6 +2538,46 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
pdata = &qp_range;
break;
}
+ case V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED: {
+ struct v4l2_ctrl *qp_max;
+
+ qp_max = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED);
+ if (ctrl->val >= qp_max->val) {
+ dprintk(VIDC_ERR,
+ "Bad range: Min QP PACKED (0x%x) > Max QP PACKED (0x%x)\n",
+ ctrl->val, qp_max->val);
+ rc = -ERANGE;
+ break;
+ }
+
+ property_id = HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED;
+ qp_range.layer_id = 0;
+ qp_range.max_qp = qp_max->val;
+ qp_range.min_qp = ctrl->val;
+
+ pdata = &qp_range;
+ break;
+ }
+ case V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED: {
+ struct v4l2_ctrl *qp_min;
+
+ qp_min = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED);
+ if (ctrl->val <= qp_min->val) {
+ dprintk(VIDC_ERR,
+ "Bad range: Max QP PACKED (%d) < Min QP PACKED (%d)\n",
+ ctrl->val, qp_min->val);
+ rc = -ERANGE;
+ break;
+ }
+
+ property_id = HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED;
+ qp_range.layer_id = 0;
+ qp_range.max_qp = ctrl->val;
+ qp_range.min_qp = qp_min->val;
+
+ pdata = &qp_range;
+ break;
+ }
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
int temp = 0;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index df4c99b50cd1..ac53b3bcb4ed 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -4249,6 +4249,9 @@ static inline int __resume(struct venus_hfi_device *device)
} else if (device->power_enabled) {
dprintk(VIDC_DBG, "Power is already enabled\n");
goto exit;
+ } else if (!__core_in_valid_state(device)) {
+ dprintk(VIDC_DBG, "venus_hfi_device in deinit state.");
+ return -EINVAL;
}
dprintk(VIDC_DBG, "Resuming from power collapse\n");
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 8c4fa786a424..cbb4e3569b13 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -363,6 +363,9 @@ struct hfi_hybrid_hierp {
#define HFI_RATE_CONTROL_VBR_CFR (HFI_OX_BASE + 0x3)
#define HFI_RATE_CONTROL_CBR_VFR (HFI_OX_BASE + 0x4)
#define HFI_RATE_CONTROL_CBR_CFR (HFI_OX_BASE + 0x5)
+#define HFI_RATE_CONTROL_MBR_CFR (HFI_OX_BASE + 0x6)
+#define HFI_RATE_CONTROL_MBR_VFR (HFI_OX_BASE + 0x7)
+
struct hfi_uncompressed_plane_actual_constraints_info {
u32 buffer_type;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index a368257e8a66..624fd53debe8 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -236,6 +236,7 @@ enum hal_property {
HAL_PARAM_VENC_LOW_LATENCY,
HAL_PARAM_VENC_CONSTRAINED_INTRA_PRED,
HAL_CONFIG_VENC_BLUR_RESOLUTION,
+ HAL_PARAM_VENC_SESSION_QP_RANGE_PACKED,
};
enum hal_domain {
@@ -710,6 +711,8 @@ enum hal_rate_control {
HAL_RATE_CONTROL_VBR_CFR,
HAL_RATE_CONTROL_CBR_VFR,
HAL_RATE_CONTROL_CBR_CFR,
+ HAL_RATE_CONTROL_MBR_CFR,
+ HAL_RATE_CONTROL_MBR_VFR,
HAL_UNUSED_RC = 0x10000000,
};
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d27918647b5c..a97f5df7a7db 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1572,6 +1572,7 @@ config WCD934X_CODEC
select MSM_CDC_SUPPLY
select MSM_CDC_PINCTRL
select REGMAP_ALLOW_WRITE_DEBUGFS
+ select PINCTRL_WCD
help
Enables the WCD9xxx codec core driver. The core driver provides
read/write capability to registers which are part of the
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 2160dfd063b1..22d61d96a11d 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -39,6 +39,10 @@ static enum wcd9xxx_intf_status wcd9xxx_intf = -1;
static struct mfd_cell tavil_devs[] = {
{
+ .name = "qcom-wcd-pinctrl",
+ .of_compatible = "qcom,wcd-pinctrl",
+ },
+ {
.name = "tavil_codec",
},
};
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 86f3a4800d1d..3f9eeabc5464 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -3339,7 +3339,7 @@ static ssize_t wcnss_wlan_write(struct file *fp, const char __user
return -EFAULT;
if ((UINT32_MAX - count < penv->user_cal_rcvd) ||
- MAX_CALIBRATED_DATA_SIZE < count + penv->user_cal_rcvd) {
+ (penv->user_cal_exp_size < count + penv->user_cal_rcvd)) {
pr_err(DEVICE " invalid size to write %zu\n", count +
penv->user_cal_rcvd);
rc = -ENOMEM;
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 88011626e05e..918f8c82acdd 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -325,8 +325,10 @@ static int nqx_ese_pwr(struct nqx_dev *nqx_dev, unsigned long int arg)
} else if (arg == 3) {
if (!nqx_dev->nfc_ven_enabled)
r = 0;
- else
- r = gpio_get_value(nqx_dev->ese_gpio);
+ else {
+ if (gpio_is_valid(nqx_dev->ese_gpio))
+ r = gpio_get_value(nqx_dev->ese_gpio);
+ }
}
return r;
}
@@ -375,11 +377,14 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
__func__, nqx_dev);
if (gpio_is_valid(nqx_dev->firm_gpio))
gpio_set_value(nqx_dev->firm_gpio, 0);
- if (!gpio_get_value(nqx_dev->ese_gpio)) {
- dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
- gpio_set_value(nqx_dev->en_gpio, 0);
- } else {
- dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+
+ if (gpio_is_valid(nqx_dev->ese_gpio)) {
+ if (!gpio_get_value(nqx_dev->ese_gpio)) {
+ dev_dbg(&nqx_dev->client->dev, "disabling en_gpio\n");
+ gpio_set_value(nqx_dev->en_gpio, 0);
+ } else {
+ dev_dbg(&nqx_dev->client->dev, "keeping en_gpio high\n");
+ }
}
r = nqx_clock_deselect(nqx_dev);
if (r < 0)
@@ -405,9 +410,11 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
* We are switching to Dowload Mode, toggle the enable pin
* in order to set the NFCC in the new mode
*/
- if (gpio_get_value(nqx_dev->ese_gpio)) {
- dev_err(&nqx_dev->client->dev, "FW download forbidden while ese is on\n");
- return -EBUSY; /* Device or resource busy */
+ if (gpio_is_valid(nqx_dev->ese_gpio)) {
+ if (gpio_get_value(nqx_dev->ese_gpio)) {
+ dev_err(&nqx_dev->client->dev, "FW download forbidden while ese is on\n");
+ return -EBUSY; /* Device or resource busy */
+ }
}
gpio_set_value(nqx_dev->en_gpio, 1);
msleep(20);
@@ -828,6 +835,7 @@ static int nqx_probe(struct i2c_client *client,
nqx_dev->en_gpio = platform_data->en_gpio;
nqx_dev->irq_gpio = platform_data->irq_gpio;
nqx_dev->firm_gpio = platform_data->firm_gpio;
+ nqx_dev->ese_gpio = platform_data->ese_gpio;
nqx_dev->clkreq_gpio = platform_data->clkreq_gpio;
nqx_dev->pdata = platform_data;
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
index 5f140cd0c2a6..4410f270f557 100644
--- a/drivers/of/of_batterydata.c
+++ b/drivers/of/of_batterydata.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -312,32 +312,15 @@ static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
struct device_node *of_batterydata_get_best_profile(
const struct device_node *batterydata_container_node,
- const char *psy_name, const char *batt_type)
+ int batt_id_kohm, const char *batt_type)
{
struct batt_ids batt_ids;
struct device_node *node, *best_node = NULL;
- struct power_supply *psy;
const char *battery_type = NULL;
- union power_supply_propval ret = {0, };
int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
- batt_id_kohm = 0, i = 0, rc = 0, limit = 0;
+ i = 0, rc = 0, limit = 0;
bool in_range = false;
- psy = power_supply_get_by_name(psy_name);
- if (!psy) {
- pr_err("%s supply not found. defer\n", psy_name);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- rc = power_supply_get_property(psy, POWER_SUPPLY_PROP_RESISTANCE_ID,
- &ret);
- if (rc) {
- pr_err("failed to retrieve resistance value rc=%d\n", rc);
- return ERR_PTR(-ENOSYS);
- }
-
- batt_id_kohm = ret.intval / 1000;
-
/* read battery id range percentage for best profile */
rc = of_property_read_u32(batterydata_container_node,
"qcom,batt-id-range-pct", &id_range_pct);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index b9819b929a91..4805c4feac74 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -120,4 +120,11 @@ config PINCTRL_MSMFALCON
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm MSMFALCON platform.
+config PINCTRL_WCD
+ tristate "Qualcomm Technologies, Inc WCD pin controller driver"
+ depends on WCD934X_CODEC
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ WCD gpio controller block.
+
endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 0d390906ea00..bddc21431eeb 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
obj-$(CONFIG_PINCTRL_MSM8996) += pinctrl-msm8996.o
obj-$(CONFIG_PINCTRL_MSMCOBALT) += pinctrl-msmcobalt.o
obj-$(CONFIG_PINCTRL_MSMFALCON) += pinctrl-msmfalcon.o
+obj-$(CONFIG_PINCTRL_WCD) += pinctrl-wcd.o
diff --git a/drivers/pinctrl/qcom/pinctrl-wcd.c b/drivers/pinctrl/qcom/pinctrl-wcd.c
new file mode 100644
index 000000000000..08d87f7452eb
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-wcd.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mfd/wcd934x/registers.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define WCD_REG_DIR_CTL WCD934X_CHIP_TIER_CTRL_GPIO_CTL_OE
+#define WCD_REG_VAL_CTL WCD934X_CHIP_TIER_CTRL_GPIO_CTL_DATA
+#define WCD_GPIO_PULL_UP 1
+#define WCD_GPIO_PULL_DOWN 2
+#define WCD_GPIO_BIAS_DISABLE 3
+#define WCD_GPIO_STRING_LEN 20
+
+/**
+ * struct wcd_gpio_pad - keep current GPIO settings
+ * @offset: offset of gpio.
+ * @is_valid: Set to false, when GPIO in high Z state.
+ * @value: value of a pin
+ * @output_enabled: Set to true if GPIO is output and false if it is input
+ * @pullup: Constant current which flow through GPIO output buffer.
+ * @strength: Drive strength of a pin
+ */
+struct wcd_gpio_pad {
+ u16 offset;
+ bool is_valid;
+ bool value;
+ bool output_enabled;
+ unsigned int pullup;
+ unsigned int strength;
+};
+
+struct wcd_gpio_priv {
+ struct device *dev;
+ struct regmap *map;
+ struct pinctrl_dev *ctrl;
+ struct gpio_chip chip;
+};
+
+static inline struct wcd_gpio_priv *to_gpio_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct wcd_gpio_priv, chip);
+};
+
+static int wcd_gpio_read(struct wcd_gpio_priv *priv_data,
+ struct wcd_gpio_pad *pad, unsigned int addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(priv_data->map, addr, &val);
+ if (ret < 0)
+ dev_err(priv_data->dev, "%s: read 0x%x failed\n",
+ __func__, addr);
+ else
+ ret = (val >> pad->offset);
+
+ return ret;
+}
+
+static int wcd_gpio_write(struct wcd_gpio_priv *priv_data,
+ struct wcd_gpio_pad *pad, unsigned int addr,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(priv_data->map, addr, (1 << pad->offset),
+ val << pad->offset);
+ if (ret < 0)
+ dev_err(priv_data->dev, "write 0x%x failed\n", addr);
+
+ return ret;
+}
+
+static int wcd_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ return pctldev->desc->npins;
+}
+
+static const char *wcd_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned pin)
+{
+ return pctldev->desc->pins[pin].name;
+}
+
+static int wcd_get_group_pins(struct pinctrl_dev *pctldev, unsigned pin,
+ const unsigned **pins, unsigned *num_pins)
+{
+ *pins = &pctldev->desc->pins[pin].number;
+ *num_pins = 1;
+ return 0;
+}
+
+static const struct pinctrl_ops wcd_pinctrl_ops = {
+ .get_groups_count = wcd_get_groups_count,
+ .get_group_name = wcd_get_group_name,
+ .get_group_pins = wcd_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int wcd_config_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ unsigned param = pinconf_to_config_param(*config);
+ struct wcd_gpio_pad *pad;
+ unsigned arg;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ arg = pad->pullup == WCD_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_DISABLE:
+ arg = pad->pullup = WCD_GPIO_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = pad->pullup == WCD_GPIO_PULL_UP;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ arg = !pad->is_valid;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ arg = pad->output_enabled;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ arg = pad->value;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+}
+
+static int wcd_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned nconfs)
+{
+ struct wcd_gpio_priv *priv_data = pinctrl_dev_get_drvdata(pctldev);
+ struct wcd_gpio_pad *pad;
+ unsigned param, arg;
+ int i, ret;
+
+ pad = pctldev->desc->pins[pin].drv_data;
+
+ for (i = 0; i < nconfs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ dev_dbg(priv_data->dev, "%s: param: %d arg: %d",
+ __func__, param, arg);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ pad->pullup = WCD_GPIO_BIAS_DISABLE;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pad->pullup = WCD_GPIO_PULL_UP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pad->pullup = WCD_GPIO_PULL_DOWN;
+ break;
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ pad->is_valid = false;
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ pad->output_enabled = false;
+ break;
+ case PIN_CONFIG_OUTPUT:
+ pad->output_enabled = true;
+ pad->value = arg;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ pad->strength = arg;
+ break;
+ default:
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+ if (pad->output_enabled) {
+ ret = wcd_gpio_write(priv_data, pad, WCD_REG_DIR_CTL,
+ pad->output_enabled);
+ if (ret < 0)
+ goto done;
+ ret = wcd_gpio_write(priv_data, pad, WCD_REG_VAL_CTL,
+ pad->value);
+ } else
+ ret = wcd_gpio_write(priv_data, pad, WCD_REG_DIR_CTL,
+ pad->output_enabled);
+done:
+ return ret;
+}
+
+static const struct pinconf_ops wcd_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_group_get = wcd_config_get,
+ .pin_config_group_set = wcd_config_set,
+};
+
+static int wcd_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
+{
+ struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_INPUT_ENABLE, 1);
+
+ return wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static int wcd_gpio_direction_output(struct gpio_chip *chip,
+ unsigned pin, int val)
+{
+ struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, val);
+
+ return wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static int wcd_gpio_get(struct gpio_chip *chip, unsigned pin)
+{
+ struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+ struct wcd_gpio_pad *pad;
+ int value;
+
+ pad = priv_data->ctrl->desc->pins[pin].drv_data;
+
+ if (!pad->is_valid)
+ return -EINVAL;
+
+ value = wcd_gpio_read(priv_data, pad, WCD_REG_VAL_CTL);
+ return value;
+}
+
+static void wcd_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+{
+ struct wcd_gpio_priv *priv_data = to_gpio_state(chip);
+ unsigned long config;
+
+ config = pinconf_to_config_packed(PIN_CONFIG_OUTPUT, value);
+
+ wcd_config_set(priv_data->ctrl, pin, &config, 1);
+}
+
+static const struct gpio_chip wcd_gpio_chip = {
+ .direction_input = wcd_gpio_direction_input,
+ .direction_output = wcd_gpio_direction_output,
+ .get = wcd_gpio_get,
+ .set = wcd_gpio_set,
+};
+
+static int wcd_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pinctrl_pin_desc *pindesc;
+ struct pinctrl_desc *pctrldesc;
+ struct wcd_gpio_pad *pad, *pads;
+ struct wcd_gpio_priv *priv_data;
+ int ret, i, j;
+ u32 npins;
+ char **name;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,num-gpios", &npins);
+ if (ret) {
+ dev_err(dev, "%s: Looking up %s property in node %s failed\n",
+ __func__, "qcom,num-gpios", dev->of_node->full_name);
+ ret = -EINVAL;
+ goto err_priv_alloc;
+ }
+ if (!npins) {
+ dev_err(dev, "%s: no.of pins are 0\n", __func__);
+ ret = -EINVAL;
+ goto err_priv_alloc;
+ }
+
+ priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data) {
+ ret = -ENOMEM;
+ goto err_priv_alloc;
+ }
+
+ priv_data->dev = dev;
+ priv_data->map = dev_get_regmap(dev->parent, NULL);
+ if (!priv_data->map) {
+ dev_err(dev, "%s: failed to get regmap\n", __func__);
+ ret = -EINVAL;
+ goto err_regmap;
+ }
+
+ pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
+ if (!pindesc) {
+ ret = -ENOMEM;
+ goto err_pinsec_alloc;
+ }
+
+ pads = devm_kcalloc(dev, npins, sizeof(*pads), GFP_KERNEL);
+ if (!pads) {
+ ret = -ENOMEM;
+ goto err_pads_alloc;
+ }
+
+ pctrldesc = devm_kzalloc(dev, sizeof(*pctrldesc), GFP_KERNEL);
+ if (!pctrldesc) {
+ ret = -ENOMEM;
+ goto err_pinctrl_alloc;
+ }
+
+ pctrldesc->pctlops = &wcd_pinctrl_ops;
+ pctrldesc->confops = &wcd_pinconf_ops;
+ pctrldesc->owner = THIS_MODULE;
+ pctrldesc->name = dev_name(dev);
+ pctrldesc->pins = pindesc;
+ pctrldesc->npins = npins;
+
+ name = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto err_name_alloc;
+ }
+ for (i = 0; i < npins; i++, pindesc++) {
+ name[i] = devm_kzalloc(dev, sizeof(char) * WCD_GPIO_STRING_LEN,
+ GFP_KERNEL);
+ if (!name[i]) {
+ ret = -ENOMEM;
+ goto err_pin;
+ }
+ pad = &pads[i];
+ pindesc->drv_data = pad;
+ pindesc->number = i;
+ snprintf(name[i], (WCD_GPIO_STRING_LEN - 1), "gpio%d", (i+1));
+ pindesc->name = name[i];
+ pad->offset = i;
+ pad->is_valid = true;
+ }
+
+ priv_data->chip = wcd_gpio_chip;
+ priv_data->chip.dev = dev;
+ priv_data->chip.base = -1;
+ priv_data->chip.ngpio = npins;
+ priv_data->chip.label = dev_name(dev);
+ priv_data->chip.of_gpio_n_cells = 2;
+ priv_data->chip.can_sleep = false;
+
+ priv_data->ctrl = pinctrl_register(pctrldesc, dev, priv_data);
+ if (IS_ERR(priv_data->ctrl)) {
+ dev_err(dev, "%s: failed to register to pinctrl\n", __func__);
+ ret = PTR_ERR(priv_data->ctrl);
+ goto err_pin;
+ }
+
+ ret = gpiochip_add(&priv_data->chip);
+ if (ret) {
+ dev_err(dev, "%s: can't add gpio chip\n", __func__);
+ goto err_chip;
+ }
+
+ ret = gpiochip_add_pin_range(&priv_data->chip, dev_name(dev), 0, 0,
+ npins);
+ if (ret) {
+ dev_err(dev, "%s: failed to add pin range\n", __func__);
+ goto err_range;
+ }
+ platform_set_drvdata(pdev, priv_data);
+
+ return 0;
+
+err_range:
+ gpiochip_remove(&priv_data->chip);
+err_chip:
+ pinctrl_unregister(priv_data->ctrl);
+err_pin:
+ for (j = 0; j < i; j++)
+ devm_kfree(dev, name[j]);
+ devm_kfree(dev, name);
+err_name_alloc:
+ devm_kfree(dev, pctrldesc);
+err_pinctrl_alloc:
+ devm_kfree(dev, pads);
+err_pads_alloc:
+ devm_kfree(dev, pindesc);
+err_pinsec_alloc:
+err_regmap:
+ devm_kfree(dev, priv_data);
+err_priv_alloc:
+ return ret;
+}
+
+static int wcd_pinctrl_remove(struct platform_device *pdev)
+{
+ struct wcd_gpio_priv *priv_data = platform_get_drvdata(pdev);
+
+ gpiochip_remove(&priv_data->chip);
+ pinctrl_unregister(priv_data->ctrl);
+
+ return 0;
+}
+
+static const struct of_device_id wcd_pinctrl_of_match[] = {
+ { .compatible = "qcom,wcd-pinctrl" },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, wcd_pinctrl_of_match);
+
+static struct platform_driver wcd_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-wcd-pinctrl",
+ .of_match_table = wcd_pinctrl_of_match,
+ },
+ .probe = wcd_pinctrl_probe,
+ .remove = wcd_pinctrl_remove,
+};
+
+module_platform_driver(wcd_pinctrl_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc WCD GPIO pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/qcom-charger/qpnp-fg.c b/drivers/power/qcom-charger/qpnp-fg.c
index 8660c1f8c3f5..0658f0d3b1eb 100644
--- a/drivers/power/qcom-charger/qpnp-fg.c
+++ b/drivers/power/qcom-charger/qpnp-fg.c
@@ -4772,8 +4772,7 @@ fail:
#define BATTERY_PSY_WAIT_MS 2000
static int fg_batt_profile_init(struct fg_chip *chip)
{
- int rc = 0, ret;
- int len;
+ int rc = 0, ret, len, batt_id;
struct device_node *node = chip->pdev->dev.of_node;
struct device_node *batt_node, *profile_node;
const char *data, *batt_type_str;
@@ -4802,14 +4801,16 @@ wait:
goto no_profile;
}
+ batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ batt_id /= 1000;
if (fg_debug_mask & FG_STATUS)
- pr_info("battery id = %d\n",
- get_sram_prop_now(chip, FG_DATA_BATT_ID));
- profile_node = of_batterydata_get_best_profile(batt_node, "bms",
+ pr_info("battery id = %dKOhms\n", batt_id);
+
+ profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
fg_batt_type);
- if (!profile_node) {
- pr_err("couldn't find profile handle\n");
- rc = -ENODATA;
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
goto no_profile;
}
diff --git a/drivers/power/qcom-charger/qpnp-smbcharger.c b/drivers/power/qcom-charger/qpnp-smbcharger.c
index 2536f4ec5c15..6c1e58d046e8 100644
--- a/drivers/power/qcom-charger/qpnp-smbcharger.c
+++ b/drivers/power/qcom-charger/qpnp-smbcharger.c
@@ -3507,19 +3507,27 @@ static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
return 0;
+ chip->battery_type = prop.strval;
batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
if (!batt_node) {
pr_smb(PR_MISC, "No batterydata available\n");
return 0;
}
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ if (rc < 0) {
+ pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc);
+ return 0;
+ }
+
profile_node = of_batterydata_get_best_profile(batt_node,
- "bms", NULL);
- if (!profile_node) {
- pr_err("couldn't find profile handle\n");
- return -EINVAL;
+ prop.intval / 1000, NULL);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ return rc;
}
- chip->battery_type = prop.strval;
/* change vfloat */
rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index f092f5a580b5..9a6baff27dac 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -423,7 +423,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
static const struct power_supply_desc parallel_psy_desc = {
.name = "parallel",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_USB_PARALLEL,
.properties = smb138x_parallel_props,
.num_properties = ARRAY_SIZE(smb138x_parallel_props),
.get_property = smb138x_parallel_get_prop,
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index 2f064e546f48..cdd6988418f7 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/ratelimit.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index d7d08dc588e5..84f346385f18 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -2191,6 +2191,8 @@ static int subsys_name_to_id(const char *name)
return SMEM_WCNSS;
if (!strcmp(name, "spss"))
return SMEM_SPSS;
+ if (!strcmp(name, "cdsp"))
+ return SMEM_CDSP;
return -ENODEV;
}
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 37c73401a1c6..e4d235957981 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -39,15 +39,11 @@
#define LOCATOR_NOT_PRESENT 0
#define LOCATOR_PRESENT 1
-#define LOCATOR_UNKNOWN -1
-static u32 locator_status = LOCATOR_UNKNOWN;
+static u32 locator_status = LOCATOR_NOT_PRESENT;
static bool service_inited;
-int enable;
-module_param(enable, int, 0);
-
-DECLARE_COMPLETION(locator_status_known);
+module_param_named(enable, locator_status, uint, S_IRUGO | S_IWUSR);
static void service_locator_svc_arrive(struct work_struct *work);
static void service_locator_svc_exit(struct work_struct *work);
@@ -70,47 +66,6 @@ struct pd_qmi_data service_locator;
/* Please refer soc/qcom/service-locator.h for use about APIs defined here */
-static ssize_t show_service_locator_status(struct class *cl,
- struct class_attribute *attr,
- char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%x\n", locator_status);
-}
-
-static ssize_t store_service_locator_status(struct class *cl,
- struct class_attribute *attr,
- const char *buf, size_t size)
-{
- u32 val;
-
- if (kstrtos32(buf, 10, &val) < 0)
- goto err;
- if (val != LOCATOR_NOT_PRESENT && val != LOCATOR_PRESENT)
- goto err;
-
- mutex_lock(&service_init_mutex);
- locator_status = val;
- complete_all(&locator_status_known);
- mutex_unlock(&service_init_mutex);
- return size;
-err:
- pr_err("Invalid input parameters\n");
- return -EINVAL;
-}
-
-static struct class_attribute service_locator_class_attr[] = {
- __ATTR(service_locator_status, S_IRUGO | S_IWUSR,
- show_service_locator_status,
- store_service_locator_status),
- __ATTR_NULL,
-};
-
-static struct class service_locator_class = {
- .name = "service_locator",
- .owner = THIS_MODULE,
- .class_attrs = service_locator_class_attr,
-};
-
static int service_locator_svc_event_notify(struct notifier_block *this,
unsigned long code,
void *_cmd)
@@ -338,30 +293,13 @@ static int init_service_locator(void)
mutex_lock(&service_init_mutex);
if (locator_status == LOCATOR_NOT_PRESENT) {
- pr_err("Service Locator not present\n");
+ pr_err("Service Locator not enabled\n");
rc = -ENODEV;
goto inited;
}
if (service_inited)
goto inited;
- if (locator_status != LOCATOR_PRESENT) {
- mutex_unlock(&service_init_mutex);
- rc = wait_for_completion_timeout(&locator_status_known,
- msecs_to_jiffies(INITIAL_TIMEOUT));
- if (!rc) {
- locator_status = LOCATOR_NOT_PRESENT;
- pr_err("Timed out waiting for Service Locator\n");
- return -ENODEV;
- }
- mutex_lock(&service_init_mutex);
- if (locator_status == LOCATOR_NOT_PRESENT) {
- pr_err("Service Locator not present\n");
- rc = -ENODEV;
- goto inited;
- }
- }
-
service_locator.notifier.notifier_call =
service_locator_svc_event_notify;
init_completion(&service_locator.service_available);
@@ -509,10 +447,7 @@ static struct dentry *test_servloc_file;
static int __init service_locator_init(void)
{
- if (!enable)
- locator_status = LOCATOR_NOT_PRESENT;
-
- class_register(&service_locator_class);
+ pr_debug("service_locator_status = %d\n", locator_status);
test_servloc_file = debugfs_create_file("test_servloc",
S_IRUGO | S_IWUSR, NULL, NULL,
&servloc_fops);
@@ -523,7 +458,6 @@ static int __init service_locator_init(void)
static void __exit service_locator_exit(void)
{
- class_unregister(&service_locator_class);
debugfs_remove(test_servloc_file);
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7087b5744eef..35994b827549 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1787,6 +1787,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
+ dbg_event(0xFF, "Pullup_enable", is_on);
if (dwc->revision <= DWC3_REVISION_187A) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
@@ -1824,6 +1825,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = true;
} else {
+ dbg_event(0xFF, "Pullup_disable", is_on);
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1849,8 +1851,15 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
break;
}
timeout--;
- if (!timeout)
+ if (!timeout) {
+ dev_err(dwc->dev, "failed to %s controller\n",
+ is_on ? "start" : "stop");
+ if (is_on)
+ dbg_event(0xFF, "STARTTOUT", reg);
+ else
+ dbg_event(0xFF, "STOPTOUT", reg);
return -ETIMEDOUT;
+ }
udelay(1);
} while (1);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 164bf0273597..5a355f226179 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1638,9 +1638,16 @@ static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
if (event & REGULATOR_EVENT_ENABLE) {
__mdss_restore_sec_cfg(mdata);
} else if (event & REGULATOR_EVENT_PRE_DISABLE) {
- pr_debug("mdss gdsc is getting disabled\n");
- /* halt the vbif transactions */
- mdss_mdp_vbif_axi_halt(mdata);
+ int active_cnt = atomic_read(&mdata->active_intf_cnt);
+
+ pr_debug("mdss gdsc is getting disabled, active_cnt=%d\n",
+ active_cnt);
+ /*
+ * halt the vbif transactions only if we have any active
+ * overlay session
+ */
+ if (active_cnt)
+ mdss_mdp_vbif_axi_halt(mdata);
}
return NOTIFY_OK;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index f97a9f9a9adc..cd8df78bc8c0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -561,7 +561,8 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
u64 active_line;
u64 backfill_line;
- ver_dwnscale = ((u64)src_h << PHASE_STEP_SHIFT) / dst.h;
+ ver_dwnscale = (u64)src_h << PHASE_STEP_SHIFT;
+ do_div(ver_dwnscale, dst.h);
if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
<< PHASE_STEP_SHIFT)) {
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
index fe2c996de264..5505371488d0 100644
--- a/include/linux/of_batterydata.h
+++ b/include/linux/of_batterydata.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,10 +39,7 @@ int of_batterydata_read_data(struct device_node *container_node,
* of_batterydata_get_best_profile() - Find matching battery data device node
* @batterydata_container_node: pointer to the battery-data container device
* node containing the profile nodes.
- * @psy_name: Name of the power supply which holds the
- * POWER_SUPPLY_RESISTANCE_ID value to be used to match
- * against the id resistances specified in the corresponding
- * battery data profiles.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
* @batt_type: Battery type which we want to force load the profile.
*
* This routine returns a device_node pointer to the closest match battery data
@@ -50,7 +47,7 @@ int of_batterydata_read_data(struct device_node *container_node,
*/
struct device_node *of_batterydata_get_best_profile(
struct device_node *batterydata_container_node,
- const char *psy_name, const char *batt_type);
+ int batt_id_kohm, const char *batt_type);
#else
static inline int of_batterydata_read_data(struct device_node *container_node,
struct bms_battery_data *batt_data,
@@ -60,7 +57,7 @@ static inline int of_batterydata_read_data(struct device_node *container_node,
}
static inline struct device_node *of_batterydata_get_best_profile(
struct device_node *batterydata_container_node,
- struct device_node *best_node, const char *psy_name)
+ int batt_id_kohm, const char *batt_type)
{
return -ENXIO;
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5f4d135a00cc..15ee95fcd561 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
* most likely due to retrans in 3WHS.
*/
+/* Number of full MSS to receive before Acking RFC2581 */
+#define TCP_DELACK_SEG 1
+
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
@@ -287,6 +290,11 @@ extern int sysctl_tcp_pacing_ca_ratio;
extern int sysctl_tcp_default_init_rwnd;
extern atomic_long_t tcp_memory_allocated;
+
+/* sysctl variables for controlling various tcp parameters */
+extern int sysctl_tcp_delack_seg;
+extern int sysctl_tcp_use_userconfig;
+
extern struct percpu_counter tcp_sockets_allocated;
extern int tcp_memory_pressure;
@@ -377,6 +385,12 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+/* sysctl master controller */
+extern int tcp_use_userconfig_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int tcp_proc_delayed_ack_control(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
index 9295532dec8a..79bcc1b31cf8 100644
--- a/include/soc/qcom/smem.h
+++ b/include/soc/qcom/smem.h
@@ -26,6 +26,7 @@ enum {
SMEM_TZ,
SMEM_SPSS,
SMEM_HYP,
+ SMEM_CDSP,
NUM_SMEM_SUBSYSTEMS,
};
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 34503420c882..dbba773cd49d 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -43,13 +43,13 @@
/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
#define KGSL_CONTEXT_SYNC 0x00000400
#define KGSL_CONTEXT_PWR_CONSTRAINT 0x00000800
-
#define KGSL_CONTEXT_PRIORITY_MASK 0x0000F000
#define KGSL_CONTEXT_PRIORITY_SHIFT 12
#define KGSL_CONTEXT_PRIORITY_UNDEF 0
#define KGSL_CONTEXT_IFH_NOP 0x00010000
#define KGSL_CONTEXT_SECURE 0x00020000
+#define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000
#define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000
#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 0c5c83a84362..27fe13a534b4 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -702,7 +702,13 @@ enum v4l2_mpeg_vidc_video_rate_control {
V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR = 2,
V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR = 3,
V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR = 4,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR = 5,
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR = 6,
};
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR \
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_CFR
+#define V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR \
+ V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_MBR_VFR
#define V4L2_CID_MPEG_VIDC_VIDEO_ROTATION (V4L2_CID_MPEG_MSM_VIDC_BASE+14)
enum v4l2_mpeg_vidc_video_rotation {
@@ -1155,6 +1161,11 @@ enum v4l2_mpeg_vidc_video_lowlatency_mode {
#define V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT \
(V4L2_CID_MPEG_MSM_VIDC_BASE + 90)
+#define V4L2_CID_MPEG_VIDEO_MIN_QP_PACKED \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 91)
+#define V4L2_CID_MPEG_VIDEO_MAX_QP_PACKED \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 92)
+
/* Camera class control IDs */
#define V4L2_CID_CAMERA_CLASS_BASE (V4L2_CTRL_CLASS_CAMERA | 0x900)
diff --git a/include/uapi/media/msmb_ispif.h b/include/uapi/media/msmb_ispif.h
index 26bd8a2ce87f..7f4deaf12683 100644
--- a/include/uapi/media/msmb_ispif.h
+++ b/include/uapi/media/msmb_ispif.h
@@ -72,6 +72,24 @@ enum msm_ispif_csid {
CSID_MAX
};
+enum msm_ispif_pixel_odd_even {
+ PIX_EVEN,
+ PIX_ODD
+};
+
+enum msm_ispif_pixel_pack_mode {
+ PACK_BYTE,
+ PACK_PLAIN_PACK,
+ PACK_NV_P8,
+ PACK_NV_P16
+};
+
+struct msm_ispif_pack_cfg {
+ int pixel_swap_en;
+ enum msm_ispif_pixel_odd_even even_odd_sel;
+ enum msm_ispif_pixel_pack_mode pack_mode;
+};
+
struct msm_ispif_params_entry {
enum msm_ispif_vfe_intf vfe_intf;
enum msm_ispif_intftype intftype;
@@ -83,6 +101,12 @@ struct msm_ispif_params_entry {
uint16_t crop_end_pixel;
};
+struct msm_ispif_param_data_ext {
+ uint32_t num;
+ struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
+ struct msm_ispif_pack_cfg pack_cfg[CID_MAX];
+};
+
struct msm_ispif_param_data {
uint32_t num;
struct msm_ispif_params_entry entries[MAX_PARAM_ENTRIES];
@@ -111,6 +135,7 @@ enum ispif_cfg_type_t {
ISPIF_RELEASE,
ISPIF_ENABLE_REG_DUMP,
ISPIF_SET_VFE_INFO,
+ ISPIF_CFG2,
};
struct ispif_cfg_data {
@@ -123,8 +148,19 @@ struct ispif_cfg_data {
};
};
+struct ispif_cfg_data_ext {
+ enum ispif_cfg_type_t cfg_type;
+ void __user *data;
+ uint32_t size;
+};
+
+#define ISPIF_RDI_PACK_MODE_SUPPORT 1
+
#define VIDIOC_MSM_ISPIF_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE, struct ispif_cfg_data)
+#define VIDIOC_MSM_ISPIF_CFG_EXT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+1, struct ispif_cfg_data_ext)
+
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 631dbb0a7041..f640c0e3c7ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2053,7 +2053,7 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
unsigned int cur_freq, freq_required;
unsigned long flags;
int rc = 0;
- u64 group_load = 0, new_load;
+ u64 group_load = 0, new_load = 0;
if (!sched_enable_hmp)
return 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 859416724e5a..958d79e1933c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4343,7 +4343,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return 0;
sa->last_update_time = now;
- if (sched_use_pelt && !cfs_rq && weight) {
+ if (sched_use_pelt && cfs_rq && weight) {
se = container_of(sa, struct sched_entity, avg);
if (entity_is_task(se) && se->on_rq)
dec_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se));
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 24029ecf17ac..a839dcbc0fd8 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -42,6 +42,10 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -821,6 +825,25 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &one
},
+ {
+ .procname = "tcp_delack_seg",
+ .data = &sysctl_tcp_delack_seg,
+ .maxlen = sizeof(sysctl_tcp_delack_seg),
+ .mode = 0644,
+ .proc_handler = tcp_proc_delayed_ack_control,
+ .extra1 = &tcp_delack_seg_min,
+ .extra2 = &tcp_delack_seg_max,
+ },
+ {
+ .procname = "tcp_use_userconfig",
+ .data = &sysctl_tcp_use_userconfig,
+ .maxlen = sizeof(sysctl_tcp_use_userconfig),
+ .mode = 0644,
+ .proc_handler = tcp_use_userconfig_sysctl_handler,
+ .extra1 = &tcp_use_userconfig_min,
+ .extra2 = &tcp_use_userconfig_max,
+ },
+
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5f76b73034ae..e5cdafcc2140 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -306,6 +306,12 @@ EXPORT_SYMBOL(sysctl_tcp_wmem);
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
/*
* Current number of TCP sockets.
*/
@@ -1406,8 +1412,11 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+ /* Once-per-sysctl_tcp_delack_seg segments
+ * ACK was not sent by tcp_input.c
+ */
+ tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index da34f830f4bc..b8f7e621e16e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4955,7 +4955,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 193ba1fa8a9a..ce20968de667 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,6 +32,40 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+/*Function to reset tcp_ack related sysctl on resetting master control */
+void set_tcp_default(void)
+{
+ sysctl_tcp_delack_seg = TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ /* The ret value will be 0 if the input validation is successful
+ * and the values are written to sysctl table. If not, the stack
+ * will continue to work with currently configured values
+ */
+ return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (write && ret == 0) {
+ if (!sysctl_tcp_use_userconfig)
+ set_tcp_default();
+ }
+ return ret;
+}
+
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 6d75693e5c4d..f46057d027e0 100755
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -129,6 +129,7 @@
#define WCD9335_DEC_PWR_LVL_LP 0x02
#define WCD9335_DEC_PWR_LVL_HP 0x04
#define WCD9335_DEC_PWR_LVL_DF 0x00
+#define WCD9335_STRING_LEN 100
#define CALCULATE_VOUT_D(req_mv) (((req_mv - 650) * 10) / 25)
@@ -786,7 +787,7 @@ struct tasha_priv {
/* to track the status */
unsigned long status_mask;
- struct work_struct swr_add_devices_work;
+ struct work_struct tasha_add_child_devices_work;
struct wcd_swr_ctrl_platform_data swr_plat_data;
/* Port values for Rx and Tx codec_dai */
@@ -13464,7 +13465,7 @@ static int tasha_swrm_handle_irq(void *handle,
return ret;
}
-static void wcd_swr_ctrl_add_devices(struct work_struct *work)
+static void tasha_add_child_devices(struct work_struct *work)
{
struct tasha_priv *tasha;
struct platform_device *pdev;
@@ -13473,9 +13474,10 @@ static void wcd_swr_ctrl_add_devices(struct work_struct *work)
struct tasha_swr_ctrl_data *swr_ctrl_data = NULL, *temp;
int ret, ctrl_num = 0;
struct wcd_swr_ctrl_platform_data *platdata;
+ char plat_dev_name[WCD9335_STRING_LEN];
tasha = container_of(work, struct tasha_priv,
- swr_add_devices_work);
+ tasha_add_child_devices_work);
if (!tasha) {
pr_err("%s: Memory for WCD9335 does not exist\n",
__func__);
@@ -13496,17 +13498,17 @@ static void wcd_swr_ctrl_add_devices(struct work_struct *work)
platdata = &tasha->swr_plat_data;
for_each_child_of_node(wcd9xxx->dev->of_node, node) {
- temp = krealloc(swr_ctrl_data,
- (ctrl_num + 1) * sizeof(struct tasha_swr_ctrl_data),
- GFP_KERNEL);
- if (!temp) {
- dev_err(wcd9xxx->dev, "out of memory\n");
- ret = -ENOMEM;
- goto err;
- }
- swr_ctrl_data = temp;
- swr_ctrl_data[ctrl_num].swr_pdev = NULL;
- pdev = platform_device_alloc("tasha_swr_ctrl", -1);
+ if (!strcmp(node->name, "swr_master"))
+ strlcpy(plat_dev_name, "tasha_swr_ctrl",
+ (WCD9335_STRING_LEN - 1));
+ else if (strnstr(node->name, "msm_cdc_pinctrl",
+ strlen("msm_cdc_pinctrl")) != NULL)
+ strlcpy(plat_dev_name, node->name,
+ (WCD9335_STRING_LEN - 1));
+ else
+ continue;
+
+ pdev = platform_device_alloc(plat_dev_name, -1);
if (!pdev) {
dev_err(wcd9xxx->dev, "%s: pdev memory alloc failed\n",
__func__);
@@ -13516,28 +13518,45 @@ static void wcd_swr_ctrl_add_devices(struct work_struct *work)
pdev->dev.parent = tasha->dev;
pdev->dev.of_node = node;
- ret = platform_device_add_data(pdev, platdata,
- sizeof(*platdata));
- if (ret) {
- dev_err(&pdev->dev, "%s: cannot add plat data for ctrl:%d\n",
- __func__, ctrl_num);
- goto fail_pdev_add;
+ if (!strcmp(node->name, "swr_master")) {
+ ret = platform_device_add_data(pdev, platdata,
+ sizeof(*platdata));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: cannot add plat data ctrl:%d\n",
+ __func__, ctrl_num);
+ goto fail_pdev_add;
+ }
}
ret = platform_device_add(pdev);
if (ret) {
- dev_err(&pdev->dev, "%s: Cannot add swr platform device\n",
+ dev_err(&pdev->dev,
+ "%s: Cannot add platform device\n",
__func__);
goto fail_pdev_add;
}
- swr_ctrl_data[ctrl_num].swr_pdev = pdev;
- ctrl_num++;
- dev_dbg(&pdev->dev, "%s: Added soundwire ctrl device(s)\n",
- __func__);
+ if (!strcmp(node->name, "swr_master")) {
+ temp = krealloc(swr_ctrl_data,
+ (ctrl_num + 1) * sizeof(
+ struct tasha_swr_ctrl_data),
+ GFP_KERNEL);
+ if (!temp) {
+ dev_err(wcd9xxx->dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ swr_ctrl_data = temp;
+ swr_ctrl_data[ctrl_num].swr_pdev = pdev;
+ ctrl_num++;
+ dev_dbg(&pdev->dev,
+ "%s: Added soundwire ctrl device(s)\n",
+ __func__);
+ tasha->nr = ctrl_num;
+ tasha->swr_ctrl_data = swr_ctrl_data;
+ }
}
- tasha->nr = ctrl_num;
- tasha->swr_ctrl_data = swr_ctrl_data;
return;
fail_pdev_add:
@@ -13637,7 +13656,8 @@ static int tasha_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&tasha->power_gate_work, tasha_codec_power_gate_work);
mutex_init(&tasha->power_lock);
mutex_init(&tasha->sido_lock);
- INIT_WORK(&tasha->swr_add_devices_work, wcd_swr_ctrl_add_devices);
+ INIT_WORK(&tasha->tasha_add_child_devices_work,
+ tasha_add_child_devices);
BLOCKING_INIT_NOTIFIER_HEAD(&tasha->notifier);
mutex_init(&tasha->micb_lock);
mutex_init(&tasha->swr_read_lock);
@@ -13713,7 +13733,7 @@ static int tasha_probe(struct platform_device *pdev)
}
/* Update codec register default values */
tasha_update_reg_defaults(tasha);
- schedule_work(&tasha->swr_add_devices_work);
+ schedule_work(&tasha->tasha_add_child_devices_work);
tasha_get_codec_ver(tasha);
dev_info(&pdev->dev, "%s: Tasha driver probe done\n", __func__);
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 6b2ef88c7163..75387b7c2069 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -108,6 +108,7 @@ static const struct snd_kcontrol_new name##_mux = \
#define WCD934X_DEC_PWR_LVL_LP 0x02
#define WCD934X_DEC_PWR_LVL_HP 0x04
#define WCD934X_DEC_PWR_LVL_DF 0x00
+#define WCD934X_STRING_LEN 100
#define WCD934X_MAX_MICBIAS 4
#define DAPM_MICBIAS1_STANDALONE "MIC BIAS1 Standalone"
@@ -470,7 +471,7 @@ struct tavil_priv {
struct clk *wcd_ext_clk;
struct mutex codec_mutex;
- struct work_struct wcd_add_child_devices_work;
+ struct work_struct tavil_add_child_devices_work;
struct hpf_work tx_hpf_work[WCD934X_NUM_DECIMATORS];
struct tx_mute_work tx_mute_dwork[WCD934X_NUM_DECIMATORS];
};
@@ -5551,7 +5552,7 @@ static int tavil_swrm_handle_irq(void *handle,
return ret;
}
-static void wcd_add_child_devices(struct work_struct *work)
+static void tavil_add_child_devices(struct work_struct *work)
{
struct tavil_priv *tavil;
struct platform_device *pdev;
@@ -5560,9 +5561,10 @@ static void wcd_add_child_devices(struct work_struct *work)
struct tavil_swr_ctrl_data *swr_ctrl_data = NULL, *temp;
int ret, ctrl_num = 0;
struct wcd_swr_ctrl_platform_data *platdata;
+ char plat_dev_name[WCD934X_STRING_LEN];
tavil = container_of(work, struct tavil_priv,
- wcd_add_child_devices_work);
+ tavil_add_child_devices_work);
if (!tavil) {
pr_err("%s: Memory for WCD934X does not exist\n",
__func__);
@@ -5583,17 +5585,17 @@ static void wcd_add_child_devices(struct work_struct *work)
platdata = &tavil->swr.plat_data;
for_each_child_of_node(wcd9xxx->dev->of_node, node) {
- temp = krealloc(swr_ctrl_data,
- (ctrl_num + 1) * sizeof(struct tavil_swr_ctrl_data),
- GFP_KERNEL);
- if (!temp) {
- dev_err(wcd9xxx->dev, "out of memory\n");
- ret = -ENOMEM;
- goto err_mem;
- }
- swr_ctrl_data = temp;
- swr_ctrl_data[ctrl_num].swr_pdev = NULL;
- pdev = platform_device_alloc("tavil_swr_ctrl", -1);
+ if (!strcmp(node->name, "swr_master"))
+ strlcpy(plat_dev_name, "tavil_swr_ctrl",
+ (WCD934X_STRING_LEN - 1));
+ else if (strnstr(node->name, "msm_cdc_pinctrl",
+ strlen("msm_cdc_pinctrl")) != NULL)
+ strlcpy(plat_dev_name, node->name,
+ (WCD934X_STRING_LEN - 1));
+ else
+ continue;
+
+ pdev = platform_device_alloc(plat_dev_name, -1);
if (!pdev) {
dev_err(wcd9xxx->dev, "%s: pdev memory alloc failed\n",
__func__);
@@ -5603,34 +5605,51 @@ static void wcd_add_child_devices(struct work_struct *work)
pdev->dev.parent = tavil->dev;
pdev->dev.of_node = node;
- ret = platform_device_add_data(pdev, platdata,
- sizeof(*platdata));
- if (ret) {
- dev_err(&pdev->dev, "%s: cannot add plat data for ctrl:%d\n",
- __func__, ctrl_num);
- goto err_pdev_add;
+ if (strcmp(node->name, "swr_master") == 0) {
+ ret = platform_device_add_data(pdev, platdata,
+ sizeof(*platdata));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: cannot add plat data ctrl:%d\n",
+ __func__, ctrl_num);
+ goto err_pdev_add;
+ }
}
ret = platform_device_add(pdev);
if (ret) {
- dev_err(&pdev->dev, "%s: Cannot add swr platform device\n",
+ dev_err(&pdev->dev,
+ "%s: Cannot add platform device\n",
__func__);
goto err_pdev_add;
}
- swr_ctrl_data[ctrl_num].swr_pdev = pdev;
- ctrl_num++;
- dev_dbg(&pdev->dev, "%s: Added soundwire ctrl device(s)\n",
- __func__);
+ if (strcmp(node->name, "swr_master") == 0) {
+ temp = krealloc(swr_ctrl_data,
+ (ctrl_num + 1) * sizeof(
+ struct tavil_swr_ctrl_data),
+ GFP_KERNEL);
+ if (!temp) {
+ dev_err(wcd9xxx->dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto err_pdev_add;
+ }
+ swr_ctrl_data = temp;
+ swr_ctrl_data[ctrl_num].swr_pdev = pdev;
+ ctrl_num++;
+ dev_dbg(&pdev->dev,
+ "%s: Added soundwire ctrl device(s)\n",
+ __func__);
+ tavil->swr.ctrl_data = swr_ctrl_data;
+ }
}
- tavil->swr.ctrl_data = swr_ctrl_data;
return;
err_pdev_add:
platform_device_put(pdev);
err_mem:
- kfree(swr_ctrl_data);
+ return;
}
static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
@@ -5676,7 +5695,8 @@ static int tavil_probe(struct platform_device *pdev)
tavil->wcd9xxx = dev_get_drvdata(pdev->dev.parent);
tavil->dev = &pdev->dev;
- INIT_WORK(&tavil->wcd_add_child_devices_work, wcd_add_child_devices);
+ INIT_WORK(&tavil->tavil_add_child_devices_work,
+ tavil_add_child_devices);
mutex_init(&tavil->swr.read_mutex);
mutex_init(&tavil->swr.write_mutex);
mutex_init(&tavil->swr.clk_mutex);
@@ -5733,7 +5753,7 @@ static int tavil_probe(struct platform_device *pdev)
__func__);
goto err_cdc_reg;
}
- schedule_work(&tavil->wcd_add_child_devices_work);
+ schedule_work(&tavil->tavil_add_child_devices_work);
return ret;