summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/imem.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gpucc.txt23
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt31
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno-iommu.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt4
-rw-r--r--Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt58
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.txt3
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt1
-rw-r--r--Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt4
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt112
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt26
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-qpnp.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/qpnp-pdphy.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi204
-rw-r--r--arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-falcon.dtsi409
-rw-r--r--arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi159
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi88
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts29
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi42
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi136
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi34
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi96
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi64
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-rumi.dts26
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-sim.dts26
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi46
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi155
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-ion.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton.dtsi202
-rw-r--r--arch/arm/configs/msmfalcon_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig5
-rw-r--r--arch/arm64/configs/msmcortex_defconfig5
-rw-r--r--arch/arm64/configs/msmfalcon-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig2
-rw-r--r--arch/arm64/include/asm/mmu_context.h7
-rw-r--r--drivers/base/regmap/regmap-swr.c14
-rw-r--r--drivers/char/adsprpc.c26
-rw-r--r--drivers/clk/clk.c20
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c2
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c9
-rw-r--r--drivers/clk/msm/clock-osm.c46
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c83
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h1
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c122
-rw-r--r--drivers/clk/qcom/Kconfig11
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-rcg2.c2
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c55
-rw-r--r--drivers/clk/qcom/clk-voter.c133
-rw-r--r--drivers/clk/qcom/clk-voter.h50
-rw-r--r--drivers/clk/qcom/common.c31
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c39
-rw-r--r--drivers/clk/qcom/gpucc-msmfalcon.c482
-rw-r--r--drivers/cpuidle/lpm-levels.c321
-rw-r--r--drivers/cpuidle/lpm-levels.h17
-rw-r--r--drivers/devfreq/governor_bw_vbif.c12
-rw-r--r--drivers/devfreq/governor_memlat.c24
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/a5xx_reg.h1
-rw-r--r--drivers/gpu/msm/adreno.c158
-rw-r--r--drivers/gpu/msm/adreno.h67
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c259
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c76
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c1244
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h38
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c73
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h27
-rw-r--r--drivers/gpu/msm/adreno_perfcounter.c27
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c178
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h6
-rw-r--r--drivers/gpu/msm/adreno_trace.h64
-rw-r--r--drivers/gpu/msm/kgsl.c268
-rw-r--r--drivers/gpu/msm/kgsl.h19
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c4
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h6
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h168
-rw-r--r--drivers/gpu/msm/kgsl_compat.h8
-rw-r--r--drivers/gpu/msm/kgsl_device.h14
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c)642
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h198
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c13
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c45
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c6
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.h4
-rw-r--r--drivers/gpu/msm/kgsl_trace.h44
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c6
-rw-r--r--drivers/iio/adc/qcom-rradc.c100
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ots_pat9125/Kconfig14
-rw-r--r--drivers/input/misc/ots_pat9125/Makefile7
-rw-r--r--drivers/input/misc/ots_pat9125/pat9125_linux_driver.c627
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.c78
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.h58
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_platform.h17
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c236
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h11
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx_update.c13
-rw-r--r--drivers/iommu/arm-smmu.c41
-rw-r--r--drivers/iommu/dma-mapping-fast.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c4
-rw-r--r--drivers/iommu/iommu-debug.c22
-rw-r--r--drivers/iommu/msm_dma_iommu_mapping.c12
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c107
-rw-r--r--drivers/media/dvb-core/demux.h6
-rw-r--r--drivers/media/dvb-core/dmxdev.c51
-rw-r--r--drivers/media/dvb-core/dvb_demux.c16
-rw-r--r--drivers/media/dvb-core/dvb_net.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c26
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c143
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h8
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c14
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c59
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c155
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c39
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c16
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_debug.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_internal.h2
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c9
-rw-r--r--drivers/media/platform/msm/vidc/venus_boot.c13
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c18
-rw-r--r--drivers/mfd/wcd934x-regmap.c46
-rw-r--r--drivers/mfd/wcd9xxx-regmap.h7
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils.c1
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c20
-rw-r--r--drivers/mmc/host/sdhci-msm.c727
-rw-r--r--drivers/mmc/host/sdhci-msm.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c90
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.c903
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.h512
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h19
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c32
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c10
-rw-r--r--drivers/platform/msm/gsi/gsi.c236
-rw-r--r--drivers/platform/msm/gsi/gsi.h21
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c53
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h217
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_rm.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_i.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_resource.c15
-rw-r--r--drivers/platform/msm/ipa/ipa_rm_resource.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_intf.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c61
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c18
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c12
-rw-r--r--drivers/platform/msm/qpnp-revid.c9
-rw-r--r--drivers/power/power_supply_sysfs.c7
-rw-r--r--drivers/power/qcom-charger/bcl_peripheral.c2
-rw-r--r--drivers/power/qcom-charger/fg-core.h72
-rw-r--r--drivers/power/qcom-charger/fg-reg.h1
-rw-r--r--drivers/power/qcom-charger/fg-util.c55
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c1344
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c244
-rw-r--r--drivers/power/qcom-charger/smb-lib.c1300
-rw-r--r--drivers/power/qcom-charger/smb-lib.h74
-rw-r--r--drivers/power/qcom-charger/smb-reg.h40
-rw-r--r--drivers/power/qcom-charger/smb1351-charger.c8
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c29
-rw-r--r--drivers/power/reset/msm-poweroff.c197
-rw-r--r--drivers/pwm/pwm-qpnp.c11
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c26
-rw-r--r--drivers/scsi/ufs/ufshcd.c94
-rw-r--r--drivers/scsi/ufs/ufshcd.h16
-rw-r--r--drivers/soc/qcom/Kconfig8
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/common_log.c2
-rw-r--r--drivers/soc/qcom/core_hang_detect.c24
-rw-r--r--drivers/soc/qcom/early_random.c63
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c19
-rw-r--r--drivers/soc/qcom/icnss.c258
-rw-r--r--drivers/soc/qcom/msm_smem.c6
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c1
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c5
-rw-r--r--drivers/soc/qcom/qdsp6v2/voice_svc.c41
-rw-r--r--drivers/soc/qcom/qsee_ipc_irq_bridge.c12
-rw-r--r--drivers/soc/qcom/rpm-smd-debug.c2
-rw-r--r--drivers/soc/qcom/rpm-smd.c8
-rw-r--r--drivers/soc/qcom/service-notifier.c4
-rw-r--r--drivers/soc/qcom/watchdog_v2.c2
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c242
-rw-r--r--drivers/thermal/cpu_cooling.c56
-rw-r--r--drivers/thermal/lmh_lite.c13
-rw-r--r--drivers/thermal/msm_lmh_dcvs.c76
-rw-r--r--drivers/thermal/msm_thermal.c2
-rw-r--r--drivers/tty/serial/msm_serial_hs.c25
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/dwc3/gadget.c22
-rw-r--r--drivers/usb/gadget/function/f_midi.c3
-rw-r--r--drivers/usb/gadget/function/f_ncm.c82
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c6
-rw-r--r--drivers/usb/pd/policy_engine.c739
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c8
-rw-r--r--drivers/video/fbdev/msm/mdss.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c331
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h39
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c176
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c114
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h61
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c44
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c170
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c51
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c39
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c97
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c84
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c30
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c14
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h77
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c8
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c18
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--include/dt-bindings/clock/msm-clocks-cobalt.h8
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msmfalcon.h50
-rw-r--r--include/dt-bindings/clock/qcom,gpu-msmfalcon.h47
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/clk-provider.h3
-rw-r--r--include/linux/cpu_cooling.h16
-rw-r--r--include/linux/diagchar.h18
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/leds-qpnp-flash.h3
-rw-r--r--include/linux/mfd/wcd934x/registers.h2
-rw-r--r--include/linux/msm_ext_display.h1
-rw-r--r--include/linux/msm_gsi.h10
-rw-r--r--include/linux/power_supply.h7
-rw-r--r--include/linux/qpnp/qpnp-revid.h1
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/sched/core_ctl.h (renamed from kernel/sched/core_ctl.h)7
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/types.h3
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/soc/qcom/icnss.h1
-rw-r--r--include/soc/qcom/smem.h2
-rw-r--r--include/sound/wcd-dsp-mgr.h18
-rw-r--r--include/trace/events/sched.h63
-rw-r--r--include/trace/events/trace_msm_low_power.h58
-rw-r--r--include/uapi/sound/wcd-dsp-glink.h2
-rw-r--r--init/Kconfig17
-rw-r--r--init/main.c9
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/cpuhotplug.c3
-rw-r--r--kernel/sched/core.c89
-rw-r--r--kernel/sched/core_ctl.c33
-rw-r--r--kernel/sched/debug.c1
-rw-r--r--kernel/sched/fair.c70
-rw-r--r--kernel/sched/hmp.c722
-rw-r--r--kernel/sched/sched.h33
-rw-r--r--kernel/sched/tune.c224
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--lib/asn1_decoder.c16
-rw-r--r--net/core/dev.c3
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/wireless/db.txt2
-rw-r--r--sound/soc/codecs/msm_hdmi_codec_rx.c7
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c62
-rw-r--r--sound/soc/codecs/wcd-spi.c176
-rw-r--r--sound/soc/codecs/wcd9335.c20
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.c33
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.h1
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c120
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h7
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c331
-rw-r--r--sound/soc/codecs/wcd9xxx-common-v2.c5
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.c2
-rw-r--r--sound/soc/msm/msmcobalt.c77
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c287
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h14
-rw-r--r--sound/usb/usb_audio_qmi_svc.c2
329 files changed, 17017 insertions, 5193 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
index d1f8ce1e5ac8..a9d2a2456cfd 100644
--- a/Documentation/devicetree/bindings/arm/msm/imem.txt
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -46,6 +46,12 @@ Required properties:
-compatible: "qcom,msm-imem-restart_reason
-reg: start address and size of restart_reason region in imem
+Download Mode Type:
+-------------------
+Required properties:
+-compatible: "qcom,msm-imem-dload-type"
+-reg: start address and size of dload type region in imem
+
Download Mode:
--------------
Required properties:
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt b/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt
index 7f23d9a3c6e8..1700d588fd46 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt
@@ -25,6 +25,7 @@ The device tree parameters for the core hang detection are:
Required properties:
- compatible : "qcom,core-hang-detect"
+- label: unique name used to created sysfs entry
- qcom,threshold-arr :
Array of APCS_ALIAS*_CORE_HANG_THRESHOLD register address
for each core.
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
new file mode 100644
index 000000000000..9f8ea0d6ef8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -0,0 +1,23 @@
+Qualcomm Technologies, Inc Graphics Clock & Reset Controller Binding
+--------------------------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+ "qcom,gpucc-msmfalcon"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Optional properties :
+- #power-domain-cells : shall contain 1
+
+Example:
+ clock-controller@4000000 {
+ compatible = "qcom,gpucc-msmfalcon";
+ reg = <<0x5065000 0x10000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 90abf0305319..68b8f09238e0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -249,6 +249,35 @@ Optional properties:
60 = 60 frames per second (default)
- qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz.
0 = default value.
+- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region
+ (in terms of scanlines) where the
+hardware is allowed
+ to trigger a data transfer from MDP to DSI.
+ If this property is used, the region must be defined setting
+ two values, the low and the high thresholds:
+ <low_threshold high_threshold>
+ Where following condition must be met:
+ low_threshold < high_threshold
+ These values will be used by the driver in such way that if
+ the Driver receives a request to kickoff a transfer (MDP to DSI),
+ the transfer will be triggered only if the following condition
+ is satisfied:
+ low_threshold < scanline < high_threshold
+ If the condition is not met, then the driver will delay the
+ transfer by the time defined in the following property:
+ "qcom,mdss-mdp-kickoff-delay".
+ So in order to use this property, the delay property must
+ be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that
+ the driver will delay before triggering an MDP transfer if the
+ thresholds defined by the following property are not met:
+ "qcom,mdss-mdp-kickoff-threshold".
+ So in order to use this property, the threshold property must
+ be defined as well. Note that this delay cannot be zero
+ and also should not be greater than
+the fps window.
+ i.e. For 60fps value should not exceed
+16666 uS.
- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
panels in microseconds. Driver uses this number to adjust
the clock rate according to the expected transfer time.
@@ -568,6 +597,8 @@ Example:
qcom,mdss-dsi-dma-trigger = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <424000000>;
+ qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+ qcom,mdss-mdp-kickoff-delay = <1000>;
qcom,mdss-mdp-transfer-time-us = <12500>;
qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
index de88a6eba7a5..b399145ea8a2 100644
--- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
@@ -36,8 +36,6 @@ Optional properties:
for secure buffer allocation
- qcom,secure_align_mask: A mask for determining how secure buffers need to
be aligned
-- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should
- be enabled.
- List of sub nodes, one for each of the translation context banks supported.
The driver uses the names of these nodes to determine how they are used,
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
index 721a4f72563e..1ab49edfe30c 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
@@ -41,6 +41,10 @@ The channel list supported by the RRADC driver is available in the enum rradc_ch
located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum
as the channel number while requesting ADC reads.
+Optional property:
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC fabrication ID for applying the appropriate temperature
+ compensation parameters.
Example:
/* RRADC node */
diff --git a/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
new file mode 100644
index 000000000000..d9caa295cc6e
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
@@ -0,0 +1,58 @@
+PixArt pat9125 rotating switch
+
+The Pixart's PAT9125 controller is connected to the host processor via I2C.
+It detects the rotation when user rotates the switch and generates interrupt
+to the Host processor. The host processor reads the direction and number of
+steps over I2C and passes the data to the rest of the system.
+
+Required properties:
+ - compatible : should be "pixart,pat9125".
+ - reg : i2c slave address of the device.
+ - interrupt-parent : parent of interrupt.
+ - interrupts : interrupt to indicate motion of the rotating switch.
+ - vdd-supply : Power supply needed to power up the device.
+ - vld-supply : Power source required to power up I2C bus.
+
+Optional properties:
+ - pixart,inverse-x : boolean, use this to invert the x data before sending it to input framework
+ - pixart,inverse-y : boolean, use this to invert the y data before sending it to input framework
+ - pixart,press-enabled : boolean, use this to enable detection of pressing the button
+ - pinctrl-names : This should be defined if a target uses pinctrl framework.
+ See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt
+ It should specify the names of the configs that pinctrl can
+ install in driver.
+ Following are the pinctrl configs that can be installed:
+ "pmx_rot_switch_active" : Active configuration of pins,
+ it should specify active config
+ defined in pin groups of
+ interrupt gpio.
+ "pmx_rot_switch_suspend" : Disabled configuration of
+ pins, it should specify sleep
+ config defined in pin groups
+ of interrupt gpio.
+ "pmx_rot_switch_release" : Release configuration of
+ pins, it should specify release
+ config defined in pin groups of
+ interrupt gpio.
+ - pixart,irq-gpio : This should be defined if a target doesn't use pinctrl framework.
+ irq gpio, which is to provide interrupts to host, same as "interrupts" node.
+
+Required properties if 'pixart,press-enabled' DT property is defined:
+ - pixart,press-keycode : keycode to be sent when press is detected by the driver.
+
+Example:
+ pixart_pat9125@75 {
+ compatible = "pixart,pat9125";
+ reg = <0x75>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <98 0x2008>;
+ vdd-supply = <&pm8110_l5>;
+ vld-supply = <&pm8110_l17>;
+ pixart,irq-gpio = <&msm_gpio 98 0x2008>;
+ pinctrl-names = "pmx_rot_switch_active",
+ "pmx_rot_switch_suspend",
+ "pmx_rot_switch_release";
+ pinctrl-0 = <&pix_int_active>;
+ pinctrl-1 = <&pix_int_suspend>;
+ pinctrl-2 = <&pix_release>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
index 4c676fa66e62..ff8fb76166a3 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/gt9xx/gt9xx.txt
@@ -34,7 +34,7 @@ Optional properties:
It is a four tuple consisting of min x,
min y, max x and max y values.
- goodix,i2c-pull-up : To specify pull up is required.
- - goodix,no-force-update : To specify force update is allowed.
+ - goodix,force-update : To specify force update is allowed.
- goodix,enable-power-off : Power off touchscreen during suspend.
- goodix,button-map : Button map of key codes. The number of key codes
depend on panel.
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
index bd05c8bebfc8..8365762e520f 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
@@ -38,18 +38,28 @@ Optional properties:
Default value is 4500000 uA.
- qcom,rparasitic-uohm : Integer property for flash current predictive mitigation indicating
parasitic component of battery resistance. Default value is 0 uOhm.
-- qcom,lmh-ocv-threshold-uv : Required property for flash current preemptive mitigation.
+- qcom,lmh-ocv-threshold-uv : Required property for flash current preemptive LMH mitigation.
Default value is 3700000 uV.
-- qcom,lmh-rbatt-threshold-uohm : Required property for flash current preemptive mitigation.
+- qcom,lmh-rbatt-threshold-uohm : Required property for flash current preemptive LMH mitigation.
Default value is 400000 uOhm.
-- qcom,lmh-mitigation-sel : Optional property to configure flash current preemptive mitigation.
+- qcom,lmh-mitigation-sel : Optional property to configure flash current preemptive LMH mitigation.
Accepted values are:
0: MITIGATION_DISABLED
1: MITIGATION_BY_ILED_THRESHOLD
2: MITIGATION_BY_SW
Default value is 2.
-- qcom,lmh-level : Optional property to configure flash current preemptive mitigation.
+- qcom,chgr-mitigation-sel : Optional property to configure flash current preemptive charger mitigation.
+ Accepted values are:
+ 0: MITIGATION_DISABLED
+ 1: MITIGATION_BY_ILED_THRESHOLD
+ 2: MITIGATION_BY_SW
+ Default value is 2.
+- qcom,lmh-level : Optional property to configure flash current preemptive LMH mitigation.
Accepted values are 0, 1, and 3. Default value is 0.
+- qcom,iled-thrsh-ma : Optional property to configure the led current threshold at which HW
+ preemptive mitigation is triggered. Unit is mA. Default value is 1000.
+ Accepted values are in the range 0 - 3100, with steps of 100.
+ 0 disables autonomous HW mitigation.
- qcom,thermal-derate-en : Boolean property to enable flash current thermal mitigation.
- qcom,thermal-derate-current : Array of currrent limits for thermal mitigation. Required if
qcom,thermal-derate-en is specified. Unit is mA. Format is
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 3fc5d6cda7c9..0b46fd3d8ebf 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -4,6 +4,9 @@ Secure Digital Host Controller provides standard host interface to SD/MMC/SDIO c
Required properties:
- compatible : should be "qcom,sdhci-msm"
+ For SDCC version 5.0.0, MCI registers are removed from SDCC interface
+ and some registers are moved to HC. New compatible string is added to
+ support this change - "qcom,sdhci-msm-v5".
- reg : should contain SDHC, SD Core register map.
- reg-names : indicates various resources passed to driver (via reg proptery) by name.
Required "reg-names" are "hc_mem" and "core_mem"
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index a8db893f6709..80f2d8f43e35 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -36,7 +36,6 @@ Optional:
compatible "qcom,ipa-smmu-wlan-cb"
- ipa_smmu_uc: uc SMMU device
compatible "qcom,ipa-smmu-uc-cb"
-- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
- qcom,use-a2-service: determine if A2 service will be used
- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
index 93312df2a43b..babc4523a29a 100644
--- a/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-revid.txt
@@ -6,6 +6,10 @@ Required properties:
- compatible : should be "qcom,qpnp-revid"
- reg : offset and length of the PMIC peripheral register map.
+Optional property:
+- qcom,fab-id-valid: Use this property when support to read Fab
+ identification from REV ID peripheral is available.
+
Example:
qcom,revid@100 {
compatible = "qcom,qpnp-revid";
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
index bd358593fcb3..caabcd347a72 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
@@ -87,7 +87,8 @@ First Level Node - FG Gen3 device
Value type: <u32>
Definition: Percentage of monotonic SOC increase upon which the delta
SOC interrupt will be triggered. If this property is not
- specified, then the default value will be 1.
+ specified, then the default value will be 1. Possible
+ values are in the range of 0 to 12.
- qcom,fg-recharge-soc-thr
Usage: optional
@@ -145,6 +146,112 @@ First Level Node - FG Gen3 device
Value type: <bool>
Definition: Enables the cycle counter feature.
+- qcom,fg-force-load-profile
+ Usage: optional
+ Value type: <bool>
+ Definition: If set, battery profile will be force loaded if the profile
+ loaded earlier by bootloader doesn't match with the profile
+ available in the device tree.
+
+- qcom,cl-start-capacity
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery SOC threshold to start the capacity learning.
+ If this is not specified, then the default value used
+ will be 15.
+
+- qcom,cl-min-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Lower limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Upper limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity increment allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity decrement allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum limit that the capacity cannot go below in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum limit that the capacity cannot go above in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,fg-jeita-hyst-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Hysteresis applied to Jeita temperature comparison.
+ Possible values are:
+ 0 - No hysteresis
+ 1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery temperature delta interrupt threshold. Possible
+ values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
+- qcom,hold-soc-while-full:
+ Usage: optional
+ Value type: <bool>
+ Definition: A boolean property that when defined holds SOC at 100% when
+ the battery is full.
+
+- qcom,ki-coeff-soc-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of monotonic SOC threshold values to change the ki
+ coefficient for medium discharge current during discharge.
+ This should be defined in the ascending order and in the
+ range of 0-100. Array limit is set to 3.
+
+- qcom,ki-coeff-med-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for medium discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,ki-coeff-hi-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for high discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
@@ -175,6 +282,9 @@ pmicobalt_fg: qpnp,fg {
qcom,pmic-revid = <&pmicobalt_revid>;
io-channels = <&pmicobalt_rradc 3>;
io-channel-names = "rradc_batt_id";
+ qcom,ki-coeff-soc-dischg = <30 60 90>;
+ qcom,ki-coeff-med-dischg = <800 1000 1400>;
+ qcom,ki-coeff-hi-dischg = <1200 1500 2100>;
status = "okay";
qcom,fg-batt-soc@4000 {
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 21404dfc4b7b..82386ba9b082 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -53,6 +53,12 @@ Charger specific properties:
Definition: Specifies the USB input current limit in micro-amps.
If the value is not present, 1.5Amps is used as default.
+- qcom,usb-ocl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the OTG output current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default
+
- qcom,dc-icl-ua
Usage: optional
Value type: <u32>
@@ -104,6 +110,25 @@ Charger specific properties:
will use io-channel-names to match IIO input names
with IIO specifiers.
+- qcom,float-option
+ Usage: optional
+ Value type: <u32>
+ Definition: Configures how the charger behaves when a float charger is
+ detected by APSD
+ 1 - Treat as a DCP
+ 2 - Treat as a SDP
+ 3 - Disable charging
+ 4 - Suspend USB input
+
+- qcom,hvdcp-disable
+ Usage: optional
+ Value type: <empty>
+ Definition: Specifies if hvdcp charging is to be enabled or not.
+ If this property is not specified hvdcp will be enabled.
+ If this property is specified, hvdcp 2.0 detection will still
+ happen but the adapter won't be asked to switch to a higher
+ voltage point.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
@@ -137,7 +162,6 @@ pmicobalt_charger: qcom,qpnp-smb2 {
io-channels = <&pmic_rradc 0>;
io-channel-names = "rradc_batt_id";
- qcom,suspend-input;
dpdm-supply = <&qusb_phy0>;
qcom,step-soc-thresholds = <60 70 80 90>;
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
index c784a01d6411..8cb513b5605f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
@@ -15,6 +15,7 @@ Required device bindings:
- reg-names: Name for the above register.
"qpnp-lpg-channel-base" = physical base address of the
controller's LPG channel register.
+- qcom,lpg-lut-size: LPG LUT size.
- qcom,channel-id: channel Id for the PWM.
- qcom,supported-sizes: Supported PWM sizes.
Following three pwm sizes lists are supported by PWM/LPG controllers.
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
index f5c6651affea..cd1386512bd3 100644
--- a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -40,6 +40,9 @@ Optional properties:
- vconn-supply: Regulator that enables VCONN source output. This will
be supplied on the USB CC line that is not used for
communication when Ra resistance is detected.
+- qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
+ from an external regulator. If omitted, then it is
+ assumed it is connected to VBUS.
Example:
qcom,qpnp-pdphy@1700 {
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 1d7e54f68ee4..91412a10bf65 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -175,6 +175,7 @@ parade Parade Technologies Inc.
pericom Pericom Technology Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
+pixart PixArt Imaging Inc
plathome Plat'Home Co., Ltd.
plda PLDA
pixcir PIXCIR MICROELECTRONICS Co., Ltd
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index ae85dcdcb7df..d2e43b053d9b 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -776,7 +776,7 @@ __armv7_mmu_cache_on:
orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client
bic r6, r6, #1 << 31 @ 32-bit translation system
- bic r6, r6, #3 << 0 @ use only ttbr0
+ bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mcr p15, 0, r0, c7, c5, 4 @ ISB
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
index 95a8e80ccdbd..9ad9e4adce00 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
@@ -43,7 +43,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
index fd11be721dbb..6b549a4af6eb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
@@ -76,7 +76,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,compression-mode = "dsc";
qcom,config-select = <&dsi_nt35597_dsc_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index b6f19b78ea70..1e42d0846acf 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,33 +59,33 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 10
- 15 01 00 00 10 00 02 b0 03
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 24
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 c6 06
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 b0 03
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 c6 06
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index 367384a8c3e5..82413bfbca89 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -29,30 +29,30 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 03
- 15 01 00 00 10 00 02 b0 03
- 39 01 00 00 10 00 06 3b 03 08 08 64 9a
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 03
+ 15 01 00 00 00 00 02 b0 03
+ 39 01 00 00 00 00 06 3b 03 08 08 64 9a
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
@@ -69,7 +69,7 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 90df1d0c1ac0..69067f5f1cc7 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -13,6 +13,7 @@
qcom,ascent_3450mah {
/* #Ascent_860_82209_0000_3450mAh_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3450>;
qcom,batt-id-kohm = <60>;
qcom,battery-beta = <3435>;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
index 2c1edde56d6a..c3f23b75fa9c 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -13,6 +13,7 @@
qcom,itech_3000mah {
/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3000>;
qcom,batt-id-kohm = <100>;
qcom,battery-beta = <3450>;
diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi
new file mode 100644
index 000000000000..e4824418409b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-falcon.dtsi
@@ -0,0 +1,204 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+ anoc2_smmu: arm,smmu-anoc2@16c0000 {
+ compatible = "qcom,smmu-v2";
+ reg = <0x16c0000 0x40000>;
+ #iommu-cells = <1>;
+ qcom,register-save;
+ qcom,skip-init;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 376 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 377 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 378 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 442 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 443 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clock_rpmcc RPM_AGGR2_NOC_CLK>;
+ clock-names = "smmu_aggr2_noc_clk";
+ #clock-cells = <1>;
+ };
+
+ lpass_q6_smmu: arm,smmu-lpass_q6@5100000 {
+ status = "disabled";
+ compatible = "qcom,smmu-v2";
+ reg = <0x5100000 0x40000>;
+ #iommu-cells = <1>;
+ qcom,register-save;
+ qcom,skip-init;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&gdsc_hlos1_vote_lpass_adsp>;
+ clocks = <&clock_gcc HLOS1_VOTE_LPASS_ADSP_SMMU_CLK>;
+ clock-names = "lpass_q6_smmu_clk";
+ #clock-cells = <1>;
+ };
+
+ mmss_bimc_smmu: arm,smmu-mmss@cd00000 {
+ status = "disabled";
+ compatible = "qcom,smmu-v2";
+ reg = <0xcd00000 0x40000>;
+ #iommu-cells = <1>;
+ qcom,register-save;
+ qcom,skip-init;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&gdsc_bimc_smmu>;
+ clocks = <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_gcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>;
+ clock-names = "mmss_mnoc_ahb_clk",
+ "mmssnoc_axi_clk",
+ "mmss_bimc_smmu_ahb_clk",
+ "mmss_bimc_smmu_axi_clk";
+ #clock-cells = <1>;
+ qcom,bus-master-id = <MSM_BUS_MNOC_BIMC_MAS>;
+ };
+
+ kgsl_smmu: arm,smmu-kgsl@5040000 {
+ status = "disabled";
+ compatible = "qcom,smmu-v2";
+ reg = <0x5040000 0x10000>;
+ #iommu-cells = <1>;
+ qcom,dynamic;
+ qcom,register-save;
+ qcom,skip-init;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 350 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&gdsc_gpu_cx>;
+ clocks = <&clock_gcc GCC_GPU_CFG_AHB_CLK>,
+ <&clock_gcc GCC_BIMC_GFX_CLK>,
+ <&clock_gcc GCC_GPU_BIMC_GFX_CLK>;
+ clock-names = "gcc_gpu_cfg_ahb_clk",
+ "gcc_bimc_gfx_clk",
+ "gcc_gpu_bimc_gfx_clk";
+ #clock-cells = <1>;
+ };
+
+ turing_q6_smmu: arm,smmu-turing_q6@5180000 {
+ status = "disabled";
+ compatible = "qcom,smmu-v2";
+ reg = <0x5180000 0x40000>;
+ #iommu-cells = <1>;
+ qcom,register-save;
+ qcom,skip-init;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 533 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 534 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 535 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 536 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 537 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 538 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 539 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 540 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 541 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 542 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 543 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 544 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 545 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 546 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 547 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 548 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 549 IRQ_TYPE_LEVEL_HIGH>;
+ vdd-supply = <&gdsc_hlos1_vote_turing_adsp>;
+ clocks = <&clock_gcc HLOS1_VOTE_TURING_ADSP_SMMU_CLK>;
+ clock-names = "turing_q6_smmu_clk";
+ #clock-cells = <1>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-falcon.dtsi
new file mode 100644
index 000000000000..f060f2d7008c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-arm-smmu-impl-defs-falcon.dtsi
@@ -0,0 +1,409 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&kgsl_smmu {
+ attach-impl-defs = <0x6000 0x2378>,
+ <0x6060 0x1055>,
+ <0x678c 0x8>,
+ <0x6794 0x28>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6924 0x204>,
+ <0x6928 0x11000>,
+ <0x6930 0x800>,
+ <0x6960 0xffffffff>,
+ <0x6b64 0x1a5551>,
+ <0x6b68 0x9a82a382>;
+};
+
+&lpass_q6_smmu {
+ attach-impl-defs = <0x6000 0x2378>,
+ <0x6060 0x1055>,
+ <0x6070 0xe0>,
+ <0x6074 0xe0>,
+ <0x6078 0xe0>,
+ <0x607c 0xe0>,
+ <0x60f0 0xc0>,
+ <0x60f4 0xc8>,
+ <0x60f8 0xd0>,
+ <0x60fc 0xd8>,
+ <0x6170 0x0>,
+ <0x6174 0x30>,
+ <0x6178 0x60>,
+ <0x617c 0x90>,
+ <0x6270 0x0>,
+ <0x6274 0x2>,
+ <0x6278 0x4>,
+ <0x627c 0x6>,
+ <0x62f0 0x8>,
+ <0x62f4 0xe>,
+ <0x62f8 0x14>,
+ <0x62fc 0x1a>,
+ <0x6370 0x20>,
+ <0x6374 0x40>,
+ <0x6378 0x60>,
+ <0x637c 0x80>,
+ <0x6784 0x0>,
+ <0x678c 0x10>,
+ <0x67a0 0x0>,
+ <0x67a4 0x0>,
+ <0x67a8 0x20>,
+ <0x67b0 0x0>,
+ <0x67b4 0x8>,
+ <0x67b8 0xc8>,
+ <0x67d0 0x4>,
+ <0x67dc 0x8>,
+ <0x67e0 0x8>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6924 0x202>,
+ <0x6928 0x10a00>,
+ <0x6930 0x500>,
+ <0x6960 0xffffffff>,
+ <0x6b64 0x121151>,
+ <0x6b68 0xea800080>,
+ <0x6c00 0x0>,
+ <0x6c04 0x0>,
+ <0x6c08 0x0>,
+ <0x6c0c 0x0>,
+ <0x6c10 0x1>,
+ <0x6c14 0x1>,
+ <0x6c18 0x1>,
+ <0x6c1c 0x1>,
+ <0x6c20 0x2>,
+ <0x6c24 0x2>,
+ <0x6c28 0x2>,
+ <0x6c2c 0x2>,
+ <0x6c30 0x3>,
+ <0x6c34 0x3>,
+ <0x6c38 0x3>,
+ <0x6c3c 0x3>;
+};
+
+&turing_q6_smmu {
+ attach-impl-defs = <0x6000 0x2378>,
+ <0x6060 0x1055>,
+ <0x6070 0xe0>,
+ <0x6074 0xe0>,
+ <0x6078 0xe0>,
+ <0x607c 0xe0>,
+ <0x60f0 0xc0>,
+ <0x60f4 0xc8>,
+ <0x60f8 0xd0>,
+ <0x60fc 0xd8>,
+ <0x6170 0x0>,
+ <0x6174 0x30>,
+ <0x6178 0x60>,
+ <0x617c 0x90>,
+ <0x6270 0x0>,
+ <0x6274 0x2>,
+ <0x6278 0x4>,
+ <0x627c 0x6>,
+ <0x62f0 0x8>,
+ <0x62f4 0xe>,
+ <0x62f8 0x14>,
+ <0x62fc 0x1a>,
+ <0x6370 0x20>,
+ <0x6374 0x40>,
+ <0x6378 0x60>,
+ <0x637c 0x80>,
+ <0x6784 0x0>,
+ <0x678c 0x10>,
+ <0x67a0 0x0>,
+ <0x67a4 0x0>,
+ <0x67a8 0x20>,
+ <0x67b0 0x0>,
+ <0x67b4 0x8>,
+ <0x67b8 0xc8>,
+ <0x67d0 0x4>,
+ <0x67dc 0x8>,
+ <0x67e0 0x8>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6924 0x202>,
+ <0x6928 0x10a00>,
+ <0x6930 0x500>,
+ <0x6960 0xffffffff>,
+ <0x6b64 0x121151>,
+ <0x6b68 0xea800080>,
+ <0x6c00 0x0>,
+ <0x6c04 0x0>,
+ <0x6c08 0x0>,
+ <0x6c0c 0x0>,
+ <0x6c10 0x1>,
+ <0x6c14 0x1>,
+ <0x6c18 0x1>,
+ <0x6c1c 0x1>,
+ <0x6c20 0x2>,
+ <0x6c24 0x2>,
+ <0x6c28 0x2>,
+ <0x6c2c 0x2>,
+ <0x6c30 0x3>,
+ <0x6c34 0x3>,
+ <0x6c38 0x3>,
+ <0x6c3c 0x3>;
+};
+
+&mmss_bimc_smmu {
+ attach-impl-defs = <0x6000 0x2378>,
+ <0x6060 0x1055>,
+ <0x678c 0x28>,
+ <0x6794 0xe0>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6924 0x204>,
+ <0x6928 0x11002>,
+ <0x6930 0x800>,
+ <0x6960 0xffffffff>,
+ <0x6964 0xffffffff>,
+ <0x6968 0xffffffff>,
+ <0x696c 0xffffffff>,
+ <0x6b48 0x330330>,
+ <0x6b4c 0x81>,
+ <0x6b50 0x3333>,
+ <0x6b54 0x3333>,
+ <0x6b64 0x1a5555>,
+ <0x6b68 0xbaaa892a>,
+ <0x6b70 0x10100202>,
+ <0x6b74 0x10100202>,
+ <0x6b78 0x10100000>,
+ <0x6b80 0x20042004>,
+ <0x6b84 0x20042004>;
+};
+
+&anoc2_smmu {
+ attach-impl-defs = <0x6000 0x2378>,
+ <0x6060 0x1055>,
+ <0x6070 0xf>,
+ <0x6074 0x23>,
+ <0x6078 0x37>,
+ <0x607c 0x39>,
+ <0x6080 0x3f>,
+ <0x6084 0x6f>,
+ <0x6088 0x74>,
+ <0x608c 0x92>,
+ <0x6090 0xb0>,
+ <0x6094 0xf0>,
+ <0x6098 0xf0>,
+ <0x609c 0xf0>,
+ <0x60f0 0x0>,
+ <0x60f4 0x1>,
+ <0x60f8 0x3>,
+ <0x60fc 0x4>,
+ <0x6100 0x6>,
+ <0x6104 0x8>,
+ <0x6108 0x9>,
+ <0x610c 0xb>,
+ <0x6110 0xd>,
+ <0x6114 0xf>,
+ <0x6118 0xf>,
+ <0x611c 0xf>,
+ <0x6170 0x0>,
+ <0x6174 0x0>,
+ <0x6178 0x0>,
+ <0x617c 0x0>,
+ <0x6180 0x0>,
+ <0x6184 0x0>,
+ <0x6188 0x0>,
+ <0x618c 0x0>,
+ <0x6190 0x0>,
+ <0x6194 0x0>,
+ <0x6198 0x0>,
+ <0x619c 0x0>,
+ <0x6270 0x0>,
+ <0x6274 0x1>,
+ <0x6278 0x2>,
+ <0x627c 0x4>,
+ <0x6280 0x4>,
+ <0x6284 0x6>,
+ <0x6288 0x6>,
+ <0x628c 0xa>,
+ <0x6290 0xc>,
+ <0x6294 0xc>,
+ <0x6298 0xc>,
+ <0x629c 0xc>,
+ <0x62f0 0xc>,
+ <0x62f4 0x12>,
+ <0x62f8 0x18>,
+ <0x62fc 0x1a>,
+ <0x6300 0x1d>,
+ <0x6304 0x23>,
+ <0x6308 0x24>,
+ <0x630c 0x28>,
+ <0x6310 0x2c>,
+ <0x6314 0x30>,
+ <0x6318 0x30>,
+ <0x631c 0x30>,
+ <0x6370 0x30>,
+ <0x6374 0x35>,
+ <0x6378 0x3a>,
+ <0x637c 0x3e>,
+ <0x6380 0x46>,
+ <0x6384 0x50>,
+ <0x6388 0x55>,
+ <0x638c 0x5d>,
+ <0x6390 0x67>,
+ <0x6394 0x80>,
+ <0x6398 0x80>,
+ <0x639c 0x80>,
+ <0x678c 0x12>,
+ <0x6794 0x32>,
+ <0x67a0 0x0>,
+ <0x67a4 0xe1>,
+ <0x67a8 0xf0>,
+ <0x67b0 0x0>,
+ <0x67b4 0xc>,
+ <0x67b8 0x9c>,
+ <0x67d0 0x0>,
+ <0x67dc 0x4>,
+ <0x67e0 0x8>,
+ <0x6800 0x6>,
+ <0x6900 0x3ff>,
+ <0x6b48 0x330330>,
+ <0x6b4c 0x81>,
+ <0x6b50 0x1313>,
+ <0x6b64 0x121155>,
+ <0x6b68 0xcaa84920>,
+ <0x6b70 0xc0c0000>,
+ <0x6b74 0x8080000>,
+ <0x6b78 0x8080000>,
+ <0x6b80 0x20002000>,
+ <0x6b84 0x20002000>,
+ <0x6c00 0x5>,
+ <0x6c04 0x0>,
+ <0x6c08 0x5>,
+ <0x6c0c 0x0>,
+ <0x6c10 0x5>,
+ <0x6c14 0x0>,
+ <0x6c18 0x5>,
+ <0x6c1c 0x0>,
+ <0x6c20 0x5>,
+ <0x6c24 0x0>,
+ <0x6c28 0x0>,
+ <0x6c2c 0x0>,
+ <0x6c30 0x0>,
+ <0x6c34 0x0>,
+ <0x6c38 0x0>,
+ <0x6c3c 0x0>,
+ <0x6c40 0x0>,
+ <0x6c44 0x0>,
+ <0x6c48 0x0>,
+ <0x6c4c 0x0>,
+ <0x6c50 0x0>,
+ <0x6c54 0x0>,
+ <0x6c58 0x0>,
+ <0x6c5c 0x0>,
+ <0x6c60 0x0>,
+ <0x6c64 0x0>,
+ <0x6c68 0x0>,
+ <0x6c6c 0x0>,
+ <0x6c70 0x0>,
+ <0x6c74 0x0>,
+ <0x6c78 0x0>,
+ <0x6c7c 0x0>,
+ <0x6c80 0x0>,
+ <0x6c84 0x0>,
+ <0x6c88 0x0>,
+ <0x6c8c 0x0>,
+ <0x6c90 0x0>,
+ <0x6c94 0x0>,
+ <0x6c98 0x0>,
+ <0x6c9c 0x0>,
+ <0x6ca0 0x0>,
+ <0x6ca4 0x0>,
+ <0x6ca8 0x0>,
+ <0x6cac 0x0>,
+ <0x6cb0 0x0>,
+ <0x6cb4 0x0>,
+ <0x6cb8 0x0>,
+ <0x6cbc 0x0>,
+ <0x6cc0 0x0>,
+ <0x6cc4 0x0>,
+ <0x6cc8 0x0>,
+ <0x6ccc 0x0>,
+ <0x6cd0 0x0>,
+ <0x6cd4 0x0>,
+ <0x6cd8 0x0>,
+ <0x6cdc 0x0>,
+ <0x6ce0 0x0>,
+ <0x6ce4 0x0>,
+ <0x6ce8 0x0>,
+ <0x6cec 0x0>,
+ <0x6cf0 0x0>,
+ <0x6cf4 0x0>,
+ <0x6cf8 0x0>,
+ <0x6cfc 0x0>,
+ <0x6d00 0x3>,
+ <0x6d04 0x4>,
+ <0x6d08 0x4>,
+ <0x6d0c 0x0>,
+ <0x6d10 0x8>,
+ <0x6d14 0x8>,
+ <0x6d18 0x3>,
+ <0x6d1c 0x2>,
+ <0x6d20 0x4>,
+ <0x6d24 0x0>,
+ <0x6d28 0x4>,
+ <0x6d2c 0x0>,
+ <0x6d30 0x7>,
+ <0x6d34 0x0>,
+ <0x6d38 0x6>,
+ <0x6d3c 0x0>,
+ <0x6d40 0x0>,
+ <0x6d44 0x1>,
+ <0x6d48 0x4>,
+ <0x6d4c 0x0>,
+ <0x6d50 0x4>,
+ <0x6d54 0x0>,
+ <0x6d58 0x4>,
+ <0x6d5c 0x0>,
+ <0x6d60 0x0>,
+ <0x6d64 0x0>,
+ <0x6d68 0x0>,
+ <0x6d6c 0x0>,
+ <0x6d70 0x0>,
+ <0x6d74 0x0>,
+ <0x6d78 0x0>,
+ <0x6d7c 0x0>,
+ <0x6d80 0x0>,
+ <0x6d84 0x0>,
+ <0x6d88 0x0>,
+ <0x6d8c 0x0>,
+ <0x6d90 0x0>,
+ <0x6d94 0x0>,
+ <0x6d98 0x0>,
+ <0x6d9c 0x0>,
+ <0x6da0 0x0>,
+ <0x6da4 0x0>,
+ <0x6da8 0x0>,
+ <0x6dac 0x0>,
+ <0x6db0 0x0>,
+ <0x6db4 0x0>,
+ <0x6db8 0x0>,
+ <0x6dbc 0x0>,
+ <0x6dc0 0x0>,
+ <0x6dc4 0x0>,
+ <0x6dc8 0x0>,
+ <0x6dcc 0x0>,
+ <0x6dd0 0x0>,
+ <0x6dd4 0x0>,
+ <0x6dd8 0x0>,
+ <0x6ddc 0x0>,
+ <0x6de0 0x0>,
+ <0x6de4 0x0>,
+ <0x6de8 0x0>,
+ <0x6dec 0x0>,
+ <0x6df0 0x0>,
+ <0x6df4 0x0>,
+ <0x6df8 0x0>,
+ <0x6dfc 0x0>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
new file mode 100644
index 000000000000..6550ddcad86c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ /* GCC GDSCs */
+ gdsc_usb30: qcom,gdsc@10f004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30";
+ reg = <0x10f004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_ufs: qcom,gdsc@175004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_ufs";
+ reg = <0x175004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@17d034 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_lpass_adsp";
+ reg = <0x17d034 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_turing_adsp: qcom,gdsc@17d04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_turing_adsp";
+ reg = <0x17d04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos2_vote_turing_adsp: qcom,gdsc@17e04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos2_vote_turing_adsp";
+ reg = <0x17e04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ /* MMSS GDSCs */
+ bimc_smmu_hw_ctrl: syscon@c8ce024 {
+ compatible = "syscon";
+ reg = <0xc8ce024 0x4>;
+ };
+
+ gdsc_bimc_smmu: qcom,gdsc@c8ce020 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_bimc_smmu";
+ reg = <0xc8ce020 0x4>;
+ hw-ctrl-addr = <&bimc_smmu_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_venus: qcom,gdsc@c8c1024 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus";
+ reg = <0xc8c1024 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core0: qcom,gdsc@c8c1040 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core0";
+ reg = <0xc8c1040 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_camss_top: qcom,gdsc@c8c34a0 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_camss_top";
+ reg = <0xc8c34a0 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe0: qcom,gdsc@c8c3664 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe0";
+ reg = <0xc8c3664 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe1: qcom,gdsc@c8c3674 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe1";
+ reg = <0xc8c3674 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_cpp: qcom,gdsc@c8c36d4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_cpp";
+ reg = <0xc8c36d4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_mdss: qcom,gdsc@c8c2304 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_mdss";
+ reg = <0xc8c2304 0x4>;
+ status = "disabled";
+ };
+
+ /* GPU GDSCs */
+ gpu_cx_hw_ctrl: syscon@5066008 {
+ compatible = "syscon";
+ reg = <0x5066008 0x4>;
+ };
+
+ gdsc_gpu_cx: qcom,gdsc@5066004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_cx";
+ reg = <0x5066004 0x4>;
+ hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <2000>;
+ status = "disabled";
+ };
+
+ /* GPU GX GDSCs */
+ gpu_gx_domain_addr: syscon@5065130 {
+ compatible = "syscon";
+ reg = <0x5065130 0x4>;
+ };
+
+ gpu_gx_sw_reset: syscon@5066090 {
+ compatible = "syscon";
+ reg = <0x5066090 0x4>;
+ };
+
+ gdsc_gpu_gx: qcom,gdsc@5066094 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_gx";
+ reg = <0x5066094 0x4>;
+ domain-addr = <&gpu_gx_domain_addr>;
+ sw-reset = <&gpu_gx_sw_reset>;
+ qcom,retain-periph;
+ qcom,reset-aon-logic;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index 8a8782f5f8b3..28d230dfb6bf 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -23,6 +23,7 @@
pmicobalt_revid: qcom,revid@100 {
compatible = "qcom,qpnp-revid";
reg = <0x100 0x100>;
+ qcom,fab-id-valid;
};
qcom,power-on@800 {
@@ -310,6 +311,7 @@
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
+ qcom,pmic-revid = <&pmicobalt_revid>;
};
pmicobalt_fg: qpnp,fg {
@@ -385,6 +387,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <1>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <0>;
@@ -398,6 +401,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <2>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <1>;
@@ -411,6 +415,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <3>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <2>;
@@ -423,6 +428,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <4>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <3>;
@@ -435,6 +441,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <5>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <4>;
@@ -447,6 +454,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <6>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <5>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 912bdd88be68..7e88f524367f 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -1815,6 +1815,11 @@
reg = <0x10 8>;
};
+ dload_type@18 {
+ compatible = "qcom,msm-imem-dload-type";
+ reg = <0x18 4>;
+ };
+
restart_reason@65c {
compatible = "qcom,msm-imem-restart_reason";
reg = <0x65c 4>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
index a1d80075abe0..3681f3d34b0c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-audio.dtsi
@@ -85,14 +85,15 @@
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
- <&pcm_noirq>;
+ <&pcm_noirq>, <&cpe3>;
asoc-platform-names = "msm-pcm-dsp.0", "msm-pcm-dsp.1",
"msm-pcm-dsp.2", "msm-voip-dsp",
"msm-pcm-voice", "msm-pcm-loopback",
"msm-compress-dsp", "msm-pcm-hostless",
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
- "msm-compr-dsp", "msm-pcm-dsp-noirq";
+ "msm-compr-dsp", "msm-pcm-dsp-noirq",
+ "msm-cpe-lsm.3";
asoc-cpu = <&dai_hdmi>, <&dai_dp>,
<&dai_mi2s0>, <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
@@ -244,6 +245,12 @@
cpe: qcom,msm-cpe-lsm {
compatible = "qcom,msm-cpe-lsm";
+ qcom,msm-cpe-lsm-id = <1>;
+ };
+
+ cpe3: qcom,msm-cpe-lsm@3 {
+ compatible = "qcom,msm-cpe-lsm";
+ qcom,msm-cpe-lsm-id = <3>;
};
qcom,wcd-dsp-mgr {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
index 929a079c64c3..a660ea06795e 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-blsp.dtsi
@@ -736,8 +736,10 @@
clocks = <&clock_gcc clk_gcc_blsp1_uart3_apps_clk>,
<&clock_gcc clk_gcc_blsp1_ahb_clk>;
pinctrl-names = "sleep", "default";
- pinctrl-0 = <&blsp1_uart3_sleep>;
- pinctrl-1 = <&blsp1_uart3_active>;
+ pinctrl-0 = <&blsp1_uart3_tx_sleep>, <&blsp1_uart3_rxcts_sleep>,
+ <&blsp1_uart3_rfr_sleep>;
+ pinctrl-1 = <&blsp1_uart3_tx_active>,
+ <&blsp1_uart3_rxcts_active>, <&blsp1_uart3_rfr_active>;
qcom,msm-bus,name = "buart3";
qcom,msm-bus,num-cases = <2>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
index a37fa26b1055..27e537c9c702 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera.dtsi
@@ -391,6 +391,7 @@
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_camss_ahb_clk>,
<&clock_mmss clk_mmss_camss_top_ahb_clk>,
+ <&clock_mmss clk_cpp_clk_src>,
<&clock_mmss clk_mmss_camss_cpp_clk>,
<&clock_mmss clk_mmss_camss_cpp_ahb_clk>,
<&clock_mmss clk_mmss_camss_cpp_axi_clk>,
@@ -400,10 +401,11 @@
clock-names = "mmssnoc_axi_clk",
"mnoc_ahb_clk",
"camss_ahb_clk", "camss_top_ahb_clk",
+ "cpp_src_clk",
"cpp_core_clk", "camss_cpp_ahb_clk",
"camss_cpp_axi_clk", "micro_iface_clk",
"mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
- qcom,clock-rates = <0 0 0 0 200000000 0 0 0 0 0>;
+ qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
qcom,min-clock-rate = <200000000>;
qcom,bus-master = <1>;
qcom,vbif-qos-setting = <0x20 0x10000000>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 085ca0187ee6..fcceac6e2469 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -21,6 +21,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index d973bc5ed84f..0278cbde90ce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -82,6 +82,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
@@ -90,6 +91,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 99402e3033ed..7948dc3489cb 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -22,6 +22,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
@@ -287,9 +289,11 @@
reg = <0x1000 0x700>;
io-channels = <&smb138x_tadc 2>,
- <&smb138x_tadc 12>;
+ <&smb138x_tadc 12>,
+ <&smb138x_tadc 3>;
io-channel-names = "charger_temp",
- "charger_temp_max";
+ "charger_temp_max",
+ "batt_i";
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
index 3975bc5d16f5..e5fd988dccce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
@@ -1223,29 +1223,83 @@
};
};
- blsp1_uart3_active: blsp1_uart3_active {
- mux {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "blsp_uart3_a";
+ blsp1_uart3: blsp1_uart3 {
+ blsp1_uart3_tx_active: blsp1_uart3_tx_active {
+ mux {
+ pins = "gpio45";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
- config {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <2>;
- bias-disable;
+ blsp1_uart3_tx_sleep: blsp1_uart3_tx_sleep {
+ mux {
+ pins = "gpio45";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio45";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
};
- };
- blsp1_uart3_sleep: blsp1_uart3_sleep {
- mux {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- function = "gpio";
+ blsp1_uart3_rxcts_active: blsp1_uart3_rxcts_active {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
- config {
- pins = "gpio45", "gpio46", "gpio47", "gpio48";
- drive-strength = <2>;
- bias-pull-up;
+ blsp1_uart3_rxcts_sleep: blsp1_uart3_rxcts_sleep {
+ mux {
+ pins = "gpio46", "gpio47";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio46", "gpio47";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
+ };
+
+ blsp1_uart3_rfr_active: blsp1_uart3_rfr_active {
+ mux {
+ pins = "gpio48";
+ function = "blsp_uart3_a";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart3_rfr_sleep: blsp1_uart3_rfr_sleep {
+ mux {
+ pins = "gpio48";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio48";
+ drive-strength = <2>;
+ bias-no-pull;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
index 6018124caf68..c6d7defbf35c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
@@ -24,7 +24,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -40,7 +40,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -279,6 +279,52 @@
qcom,sleep-stats-version = <2>;
};
+ qcom,rpm-rail-stats@200000 {
+ compatible = "qcom,rpm-rail-stats";
+ reg = <0x200000 0x100>,
+ <0x29000c 0x4>;
+ reg-names = "phys_addr_base",
+ "offset_addr";
+ };
+
+ qcom,rpm-log@200000 {
+ compatible = "qcom,rpm-log";
+ reg = <0x200000 0x4000>,
+ <0x290018 0x4>;
+ qcom,rpm-addr-phys = <0x200000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ qcom,rpm-master-stats@778150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x778150 0x5000>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+
+ rpm_msg_ram: memory@0x200000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x200000 0x1000>,
+ <0x290000 0x1000>;
+ };
+
+ rpm_code_ram: rpm-memory@0x778000 {
+ compatible = "qcom,rpm-code-ram";
+ reg = <0x778000 0x5000>;
+ };
+
+ qcom,system-stats {
+ compatible = "qcom,system-stats";
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ qcom,rpm-code-ram = <&rpm_code_ram>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ };
+
qcom,mpm@7781b8 {
compatible = "qcom,mpm-v2";
reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
index e53912071502..ee6a58a41b4f 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dts
@@ -21,3 +21,32 @@
compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd";
qcom,board-id = <0x02000b 0x80>;
};
+
+&soc {
+ sound-tavil {
+ qcom,model = "msmcobalt-qvr-tavil-snd-card";
+ qcom,audio-routing =
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC1", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic1",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC4", "MIC BIAS4",
+ "MIC BIAS4", "Digital Mic4",
+ "SpkrLeft IN", "SPK1 OUT";
+
+ qcom,msm-mbhc-hphl-swh = <1>;
+ /delete-property/ qcom,us-euro-gpios;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en0-gpio;
+
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
index c028ea0eeab3..f8069856f3d8 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-vr1.dtsi
@@ -61,7 +61,7 @@
50000000 100000000 200000000>;
qcom,bus-speed-mode = "SDR12", "SDR25", "SDR50", "DDR50", "SDR104";
- cd-gpios = <&tlmm 95 0x1>;
+ cd-gpios = <&tlmm 95 0x0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index c2d45ec3ef07..2a61cccad273 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -537,6 +537,28 @@
qcom,enable-time = <500>;
};
};
+
+ qcom,pmcobalt@1 {
+ pmcobalt_s10: regulator@2f00 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x2f00 0x100>;
+ regulator-name = "pmcobalt_s10";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+
+ pmcobalt_s13: regulator@3800 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3800 0x100>;
+ regulator-name = "pmcobalt_s13";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+ };
};
/* Stub regulators */
@@ -590,6 +612,9 @@
qcom,cpr-panic-reg-name-list =
"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s10>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -712,6 +737,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -752,6 +784,9 @@
qcom,cpr-panic-reg-name-list =
"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s13>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -894,6 +929,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <25>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index b255fca6a691..8da491cea8dc 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -227,9 +227,20 @@
qcom,max-bandwidth-per-pipe-kbps = <4700000>;
};
+&pmcobalt_s10 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
+&pmcobalt_s13 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
&apc0_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc0_pwrcl_vreg {
@@ -315,18 +326,18 @@
1900800000>;
qcom,cpr-ro-scaling-factor =
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <3704 3601 3465 3567 3356 3473 2686
- 2773 3049 2932 2235 3816 3800 3097
- 2966 2808>,
- <2974 3092 3288 3329 2905 3096 3119
- 3225 2865 3140 2892 3592 3408 3576
- 1559 1392>;
+ <2595 2794 2577 2762 2471 2674 2199
+ 2553 3189 3255 3192 2962 3054 2982
+ 2042 2945>,
+ <2595 2794 2577 2762 2471 2674 2199
+ 2553 3189 3255 3192 2962 3054 2982
+ 2042 2945>,
+ <2391 2550 2483 2638 2382 2564 2259
+ 2555 2766 3041 2988 2935 2873 2688
+ 2013 2784>,
+ <2066 2153 2300 2434 2220 2386 2288
+ 2465 2028 2511 2487 2734 2554 2117
+ 1892 2377>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
@@ -371,11 +382,16 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <22 22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&apc1_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc1_perfcl_vreg {
@@ -471,18 +487,18 @@
2112000000 2208000000>;
qcom,cpr-ro-scaling-factor =
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3190>,
- <4001 4019 3747 3758 3564 3480 2336
- 2247 3442 3147 2136 4156 4028 3030
- 3727 3198>,
- <3704 3601 3465 3567 3356 3473 2686
- 2773 3049 2932 2235 3816 3800 3097
- 2966 2808>,
- <2974 3092 3288 3329 2905 3096 3119
- 3225 2865 3140 2892 3592 3408 3576
- 1559 1392>;
+ <2857 3057 2828 2952 2699 2798 2446
+ 2631 2629 2578 2244 3344 3289 3137
+ 3164 2655>,
+ <2857 3057 2828 2952 2699 2798 2446
+ 2631 2629 2578 2244 3344 3289 3137
+ 3164 2655>,
+ <2603 2755 2676 2777 2573 2685 2465
+ 2610 2312 2423 2243 3104 3022 3036
+ 2740 2303>,
+ <1901 2016 2096 2228 2034 2161 2077
+ 2188 1565 1870 1925 2235 2205 2413
+ 1762 1478>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
@@ -527,6 +543,10 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <30 26>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&pm8005_s1 {
@@ -536,6 +556,7 @@
&gfx_cpr {
compatible = "qcom,cpr4-msmcobalt-v2-mmss-regulator";
+ qcom,cpr-aging-ref-voltage = <1024000>;
};
&gfx_vreg {
@@ -604,11 +625,32 @@
0 0 3487 0 3280 1896 1874 0>;
qcom,cpr-open-loop-voltage-fuse-adjustment =
- < 100000 0 0 0>;
+ < 100000 0 0 0>,
+ < 100000 0 0 0>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>,
+ < 85000 (-15000) (-15000) (-15000)>;
qcom,cpr-closed-loop-voltage-adjustment =
< 96000 18000 4000 0
- 0 13000 9000 0>;
+ 0 13000 9000 0>,
+ < 96000 18000 4000 0
+ 0 13000 9000 0>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>,
+ < 81000 3000 (-11000) (-15000)
+ (-15000) (-2000) (-6000) (-15000)>;
qcom,cpr-floor-to-ceiling-max-range =
<50000 50000 50000 50000 50000 50000 70000 70000>;
@@ -622,7 +664,7 @@
qcom,cpr-aging-max-voltage-adjustment = <15000>;
qcom,cpr-aging-ref-corner = <8>;
qcom,cpr-aging-ro-scaling-factor = <2950>;
- qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-voltage-adjustment = <0 0 1 1 1 1 1 1>;
};
&qusb_phy0 {
@@ -649,12 +691,11 @@
qcom,load-freq-tbl =
/* Encoders */
<1105920 533000000 0x55555555>, /* 4kx2304@30 */ /*TURBO*/
- < 979200 444000000 0x55555555>, /* 1080p@120,1440p@60,
+ <1036800 444000000 0x55555555>, /* 720p@240, 1080p@120,1440p@60,
* UHD@30 */ /*NOMINAL*/
- < 939700 355200000 0x55555555>, /* 4kx2304@24 */ /*SVSL1*/
- < 489600 269330000 0x55555555>, /* 1080p@60, 2560x1440@30 */
- /* SVS */
- < 432000 200000000 0x55555555>, /* 720p@120, 1080p@30 */
+ < 829440 355200000 0x55555555>, /* UHD/4096x2160@30 SVSL1 */
+ < 489600 269330000 0x55555555>, /* 1080p@60, 720p@120 SVS */
+ < 345600 200000000 0x55555555>, /* 2560x1440@24, 1080p@30 */
/* SVS2 */
/* Decoders */
@@ -665,7 +706,7 @@
<1675472 355200000 0xffffffff>, /* 4kx2304@44 */ /*SVSL1*/
<1105920 269330000 0xffffffff>, /* UHD/4k2304@30, 1080p@120 */
/* SVS */
- < 864000 200000000 0xffffffff>; /* 720p@240, 1080p@60 */
+ < 829440 200000000 0xffffffff>; /* 720p@120, 1080p@60 */
/* SVS2 */
qcom,imem-ab-tbl =
@@ -674,6 +715,35 @@
<355200000 3570000>,/* imem @ svs freq 171 Mhz */
<444000000 6750000>,/* imem @ nom freq 323 Mhz */
<533000000 8490000>;/* imem @ turbo freq 406 Mhz */
+
+ qcom,dcvs-tbl = /* minLoad LoadLow LoadHigh CodecCheck */
+ /* Decode */
+ /* Load > Nominal, Nominal <-> Turbo Eg.3840x2160@60 */
+ <1728000 1728000 2211840 0x3f00000c>,
+ /* Encoder */
+ /* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */
+ <1036800 1036800 1105920 0x04000004>,
+ /* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */
+ < 829440 829440 1036800 0x04000004>,
+ /* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */
+ < 489600 489600 829440 0x04000004>;
+
+ qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */
+ <32400 30>, /* Encoder 3840x2160@30 */
+ <32400 60>; /* Decoder 3840x2160@60 */
+
+};
+
+&soc {
+ /* Gold L2 SAW */
+ qcom,spm@178120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+
+ /* Silver L2 SAW */
+ qcom,spm@179120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
};
/* GPU overrides */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 2600fa25b73f..b21e2dcf8c1a 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -1135,13 +1135,21 @@
qcom,firmware-name = "ipa_fws";
};
- qcom,chd {
+ qcom,chd_silver {
compatible = "qcom,core-hang-detect";
+ label = "silver";
qcom,threshold-arr = <0x179880b0 0x179980b0
- 0x179a80b0 0x179b80b0 0x178880b0 0x178980b0
- 0x178a80b0 0x178b80b0>;
+ 0x179a80b0 0x179b80b0>;
qcom,config-arr = <0x179880b8 0x179980b8
- 0x179a80b8 0x179b80b8 0x178880b8 0x178980b8
+ 0x179a80b8 0x179b80b8>;
+ };
+
+ qcom,chd_gold {
+ compatible = "qcom,core-hang-detect";
+ label = "gold";
+ qcom,threshold-arr = <0x178880b0 0x178980b0
+ 0x178a80b0 0x178b80b0>;
+ qcom,config-arr = <0x178880b8 0x178980b8
0x178a80b8 0x178b80b8>;
};
@@ -1192,41 +1200,49 @@
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 2>;
qcom,secure-context-bank;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 8>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 9>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 10>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 11>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 5>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 6>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 7>;
+ dma-coherent;
};
};
@@ -2694,6 +2710,11 @@
reg = <0x10 8>;
};
+ dload_type@18 {
+ compatible = "qcom,msm-imem-dload-type";
+ reg = <0x18 4>;
+ };
+
restart_reason@65c {
compatible = "qcom,msm-imem-restart_reason";
reg = <0x65c 4>;
@@ -2876,11 +2897,6 @@
vdd-3.3-ch0-supply = <&pmcobalt_l25_pin_ctrl>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
- qcom,msm-bus,name = "msm-icnss";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps = <81 10065 0 0>,
- <81 10065 0 16000>;
qcom,icnss-vadc = <&pmcobalt_vadc>;
qcom,icnss-adc_tm = <&pmcobalt_adc_tm>;
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
index 11f602d842bc..cb5fce378b6c 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-bus.dtsi
@@ -39,8 +39,8 @@
qcom,qos-off = <4096>;
qcom,base-offset = <16384>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_AGGRE2_NOC_CLK>,
- <&clock_gcc RPM_AGGRE2_NOC_A_CLK>;
+ clocks = <&clock_rpmcc RPM_AGGR2_NOC_CLK>,
+ <&clock_rpmcc RPM_AGGR2_NOC_A_CLK>;
};
fab_bimc: fab-bimc {
@@ -52,8 +52,8 @@
qcom,bypass-qos-prg;
qcom,util-fact = <153>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_BIMC_MSMBUS_CLK>,
- <&clock_gcc RPM_BIMC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc BIMC_MSMBUS_CLK>,
+ <&clock_rpmcc BIMC_MSMBUS_A_CLK>;
};
fab_cnoc: fab-cnoc {
@@ -64,8 +64,8 @@
qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_CNOC_MSMBUS_CLK>,
- <&clock_gcc RPM_CNOC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc CNOC_MSMBUS_CLK>,
+ <&clock_rpmcc CNOC_MSMBUS_A_CLK>;
};
fab_gnoc: fab-gnoc {
@@ -87,8 +87,8 @@
qcom,base-offset = <20480>;
qcom,util-fact = <154>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_MMSSNOC_AXI_CLK>,
- <&clock_gcc RPM_MMSSNOC_AXI_A_CLK>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_rpmcc MMSSNOC_AXI_A_CLK>;
};
fab_snoc: fab-snoc {
@@ -101,8 +101,8 @@
qcom,qos-off = <4096>;
qcom,base-offset = <24576>;
clock-names = "bus_clk", "bus_a_clk";
- clocks = <&clock_gcc RPM_SNOC_MSMBUS_CLK>,
- <&clock_gcc RPM_SNOC_MSMBUS_A_CLK>;
+ clocks = <&clock_rpmcc SNOC_MSMBUS_CLK>,
+ <&clock_rpmcc SNOC_MSMBUS_A_CLK>;
};
fab_mnoc_ahb: fab-mnoc-ahb {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
index b60d4013dad8..3826b00bf09e 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-coresight.dtsi
@@ -30,8 +30,8 @@
coresight-name = "coresight-tmc-etr";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
port{
@@ -80,8 +80,8 @@
coresight-ctis = <&cti0 &cti8>;
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports{
@@ -115,8 +115,8 @@
coresight-name = "coresight-funnel-merg";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports {
@@ -150,8 +150,8 @@
coresight-name = "coresight-funnel-in0";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports {
@@ -193,8 +193,8 @@
coresight-name = "coresight-stm";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
port{
@@ -211,8 +211,8 @@
coresight-name = "coresight-cti0";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -223,8 +223,8 @@
coresight-name = "coresight-cti1";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -235,8 +235,8 @@
coresight-name = "coresight-cti2";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -247,8 +247,8 @@
coresight-name = "coresight-cti3";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -259,8 +259,8 @@
coresight-name = "coresight-cti4";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -271,8 +271,8 @@
coresight-name = "coresight-cti5";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -283,8 +283,8 @@
coresight-name = "coresight-cti6";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -295,8 +295,8 @@
coresight-name = "coresight-cti7";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -307,8 +307,8 @@
coresight-name = "coresight-cti8";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -319,8 +319,8 @@
coresight-name = "coresight-cti9";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -331,8 +331,8 @@
coresight-name = "coresight-cti10";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -343,8 +343,8 @@
coresight-name = "coresight-cti11";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -355,8 +355,8 @@
coresight-name = "coresight-cti12";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -367,8 +367,8 @@
coresight-name = "coresight-cti13";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -379,8 +379,8 @@
coresight-name = "coresight-cti14";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -391,8 +391,8 @@
coresight-name = "coresight-cti15";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
};
@@ -405,8 +405,8 @@
coresight-name = "coresight-funnel-qatb";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "apb_pclk", "core_a_clk";
ports {
@@ -451,8 +451,8 @@
<5 32>,
<9 64>;
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
ports {
@@ -483,8 +483,8 @@
coresight-name = "coresight-tpdm-dcc";
- clocks = <&clock_gcc RPM_QDSS_CLK>,
- <&clock_gcc RPM_QDSS_A_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
clock-names = "core_clk", "core_a_clk";
port{
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
index d28d09c2a527..e8c66871425d 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-pinctrl.dtsi
@@ -48,5 +48,69 @@
output-low;
};
};
+
+ /* SDC pin type */
+ sdc1_clk_on: sdc1_clk_on {
+ config {
+ pins = "sdc1_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <16>; /* 16 MA */
+ };
+ };
+
+ sdc1_clk_off: sdc1_clk_off {
+ config {
+ pins = "sdc1_clk";
+ bias-disable; /* NO pull */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc1_cmd_on: sdc1_cmd_on {
+ config {
+ pins = "sdc1_cmd";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc1_cmd_off: sdc1_cmd_off {
+ config {
+ pins = "sdc1_cmd";
+ num-grp-pins = <1>;
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc1_data_on: sdc1_data_on {
+ config {
+ pins = "sdc1_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <10>; /* 10 MA */
+ };
+ };
+
+ sdc1_data_off: sdc1_data_off {
+ config {
+ pins = "sdc1_data";
+ bias-pull-up; /* pull up */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ sdc1_rclk_on: sdc1_rclk_on {
+ config {
+ pins = "sdc1_rclk";
+ bias-pull-down; /* pull down */
+ };
+ };
+
+ sdc1_rclk_off: sdc1_rclk_off {
+ config {
+ pins = "sdc1_rclk";
+ bias-pull-down; /* pull down */
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-rumi.dts b/arch/arm/boot/dts/qcom/msmfalcon-rumi.dts
index 0d694a6cd9fa..f0ba8b115120 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-rumi.dts
+++ b/arch/arm/boot/dts/qcom/msmfalcon-rumi.dts
@@ -27,3 +27,29 @@
pinctrl-names = "default";
pinctrl-0 = <&uart_console_active>;
};
+
+&sdhc_1 {
+ /* device core power supply */
+ vdd-supply = <&pmfalcon_l4b>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <200 570000>;
+
+ /* device communication power supply */
+ vdd-io-supply = <&pmfalcon_l8a>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 325000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000 50000000 192000000
+ 384000000>;
+
+ qcom,nonremovable;
+ qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-sim.dts b/arch/arm/boot/dts/qcom/msmfalcon-sim.dts
index eaaa1b407425..085419b7e108 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-sim.dts
+++ b/arch/arm/boot/dts/qcom/msmfalcon-sim.dts
@@ -27,3 +27,29 @@
pinctrl-names = "default";
pinctrl-0 = <&uart_console_active>;
};
+
+&sdhc_1 {
+ /* device core power supply */
+ vdd-supply = <&pmfalcon_l4b>;
+ qcom,vdd-voltage-level = <2950000 2950000>;
+ qcom,vdd-current-level = <200 570000>;
+
+ /* device communication power supply */
+ vdd-io-supply = <&pmfalcon_l8a>;
+ qcom,vdd-io-always-on;
+ qcom,vdd-io-lpm-sup;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ qcom,vdd-io-current-level = <200 325000>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on &sdc1_rclk_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off &sdc1_rclk_off>;
+
+ qcom,clk-rates = <400000 20000000 25000000 50000000 192000000
+ 384000000>;
+
+ qcom,nonremovable;
+ qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v";
+
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
index bdbcd9d7b6f9..e93067e3697c 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
@@ -172,4 +172,50 @@
compatible = "qcom,smp2pgpio_test_smp2p_5_out";
gpios = <&smp2pgpio_smp2p_5_out 0 0>;
};
+
+ /* ssr - inbound entry from lpass */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - inbound entry from turing */
+ smp2pgpio_ssr_smp2p_5_in: qcom,smp2pgpio-ssr-smp2p-5-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <5>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to turing */
+ smp2pgpio_ssr_smp2p_5_out: qcom,smp2pgpio-ssr-smp2p-5-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <5>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index 67748d6683c0..8e8c407734eb 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -14,6 +14,7 @@
#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
@@ -25,6 +26,7 @@
aliases {
serial0 = &uartblsp1dm1;
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
};
chosen {
@@ -135,6 +137,22 @@
};
};
+ clocks {
+ xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
soc: soc { };
reserved-memory {
@@ -360,19 +378,50 @@
};
};
- clock_gcc: qcom,dummycc {
+ clock_rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msmfalcon", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
+ clock_gcc: clock-controller@100000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_mmss: qcom,dummycc {
+ clock_mmss: clock-controller@c8c0000 {
compatible = "qcom,dummycc";
+ clock-output-names = "mmss_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_gfx: qcom,dummycc {
+ clock_gfx: clock-controller@5065000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gfx_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ sdhc_1: sdhci@c0c4000 {
+ compatible = "qcom,sdhci-msm-v5";
+ reg = <0xc0c4000 0x1000>, <0xc0c5000 0x1000>;
+ reg-names = "hc_mem", "cmdq_mem";
+
+ interrupts = <0 129 0>, <0 227 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+
+ qcom,bus-width = <8>;
+ qcom,large-address-bus;
+
+ qcom,devfreq,freq-table = <50000000 200000000>;
+
+ clocks = <&clock_gcc GCC_SDCC1_AHB_CLK>,
+ <&clock_gcc GCC_SDCC1_APPS_CLK>;
+ clock-names = "iface_clk", "core_clk";
+
+ status = "disabled";
};
qcom,ipc-spinlock@1f40000 {
@@ -398,7 +447,7 @@
<0x10b4000 0x800>;
reg-names = "dcc-base", "dcc-ram-base";
- clocks = <&clock_gcc RPM_QDSS_CLK>;
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>;
clock-names = "dcc_clk";
};
@@ -620,12 +669,97 @@
memory-region = <&venus_fw_mem>;
status = "ok";
};
+
+ qcom,icnss@18800000 {
+ status = "disabled";
+ compatible = "qcom,icnss";
+ reg = <0x18800000 0x800000>,
+ <0x10ac000 0x20>;
+ reg-names = "membase", "mpm_config";
+ interrupts = <0 413 0>, /* CE0 */
+ <0 414 0>, /* CE1 */
+ <0 415 0>, /* CE2 */
+ <0 416 0>, /* CE3 */
+ <0 417 0>, /* CE4 */
+ <0 418 0>, /* CE5 */
+ <0 420 0>, /* CE6 */
+ <0 421 0>, /* CE7 */
+ <0 422 0>, /* CE8 */
+ <0 423 0>, /* CE9 */
+ <0 424 0>, /* CE10 */
+ <0 425 0>; /* CE11 */
+ qcom,wlan-msa-memory = <0x100000>;
+ };
+
+ qcom,lpass@15700000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x15700000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmcc CXO_PIL_LPASS_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ memory-region = <&adsp_fw_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ status = "ok";
+ };
+
+ qcom,turing@1a300000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x1a300000 0x00100>;
+ interrupts = <0 518 1>;
+
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmcc CXO_PIL_CDSP_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <18>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <7>;
+ qcom,ssctl-instance-id = <0x17>;
+ qcom,firmware-name = "cdsp";
+ memory-region = <&cdsp_fw_mem>;
+
+ /* GPIO inputs from turing */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_5_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_5_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_5_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_5_in 3 0>;
+
+ /* GPIO output to turing*/
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+ status = "ok";
+ };
};
#include "msmfalcon-ion.dtsi"
#include "msmfalcon-bus.dtsi"
#include "msmfalcon-regulator.dtsi"
-#include "msm-gdsc-cobalt.dtsi"
+#include "msm-gdsc-falcon.dtsi"
&gdsc_usb30 {
clock-names = "core_clk";
@@ -649,6 +783,14 @@
status = "ok";
};
+&gdsc_hlos1_vote_turing_adsp {
+ status = "ok";
+};
+
+&gdsc_hlos2_vote_turing_adsp {
+ status = "ok";
+};
+
&gdsc_venus {
status = "ok";
};
@@ -699,5 +841,8 @@
&gdsc_gpu_cx {
status = "ok";
};
+
#include "msm-pmfalcon.dtsi"
#include "msm-pm2falcon.dtsi"
+#include "msm-arm-smmu-falcon.dtsi"
+#include "msm-arm-smmu-impl-defs-falcon.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
new file mode 100644
index 000000000000..f6deef335844
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmtriton-ion.dtsi
@@ -0,0 +1,52 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ system_contig_heap: qcom,ion-heap@21 {
+ reg = <21>;
+ qcom,ion-heap-type = "SYSTEM_CONTIG";
+ };
+
+ qcom,ion-heap@22 { /* ADSP HEAP */
+ reg = <22>;
+ memory-region = <&adsp_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@27 { /* QSEECOM HEAP */
+ reg = <27>;
+ memory-region = <&qseecom_mem>;
+ qcom,ion-heap-type = "DMA";
+ };
+
+ qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+ reg = <10>;
+ memory-region = <&secure_display_memory>;
+ qcom,ion-heap-type = "HYP_CMA";
+ };
+
+ qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
index 695a4f3b63c7..1a72414de094 100644
--- a/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton-smp2p.dtsi
@@ -133,4 +133,27 @@
compatible = "qcom,smp2pgpio-sleepstate-out";
gpios = <&smp2pgpio_sleepstate_2_out 0 0>;
};
+
+ /* ssr - inbound entry from lpass */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi
index 3f0d4cc48696..083c14af7839 100644
--- a/arch/arm/boot/dts/qcom/msmtriton.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi
@@ -14,7 +14,9 @@
#include <dt-bindings/clock/qcom,gcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. MSMTRITON";
@@ -134,6 +136,22 @@
};
};
+ clocks {
+ xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+
+ sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32764>;
+ clock-output-names = "sleep_clk";
+ };
+ };
+
soc: soc { };
reserved-memory {
@@ -159,6 +177,14 @@
reg = <0x0 0x92a00000 0x0 0x1e00000>;
};
+ venus_fw_mem: venus_fw_region {
+ compatible = "shared-dma-pool";
+ alloc-ranges = <0x0 0x80000000 0x0 0x20000000>;
+ reusable;
+ alignment = <0x0 0x400000>;
+ size = <0x0 0x800000>;
+ };
+
adsp_mem: adsp_region {
compatible = "shared-dma-pool";
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
@@ -308,19 +334,30 @@
};
};
- clock_gcc: qcom,dummycc {
+ clock_rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msmfalcon", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
+ clock_gcc: clock-controller@100000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gcc_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_mmss: qcom,dummycc {
+ clock_mmss: clock-controller@c8c0000 {
compatible = "qcom,dummycc";
+ clock-output-names = "mmss_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
- clock_gfx: qcom,dummycc {
+ clock_gfx: clock-controller@5065000 {
compatible = "qcom,dummycc";
+ clock-output-names = "gfx_clocks";
#clock-cells = <1>;
+ #reset-cells = <1>;
};
qcom,ipc-spinlock@1f40000 {
@@ -490,4 +527,163 @@
qcom,xprt-version = <1>;
qcom,fragmented-data;
};
+
+ qcom,icnss@18800000 {
+ status = "disabled";
+ compatible = "qcom,icnss";
+ reg = <0x18800000 0x800000>,
+ <0x10ac000 0x20>;
+ reg-names = "membase", "mpm_config";
+ interrupts = <0 413 0>, /* CE0 */
+ <0 414 0>, /* CE1 */
+ <0 415 0>, /* CE2 */
+ <0 416 0>, /* CE3 */
+ <0 417 0>, /* CE4 */
+ <0 418 0>, /* CE5 */
+ <0 420 0>, /* CE6 */
+ <0 421 0>, /* CE7 */
+ <0 422 0>, /* CE8 */
+ <0 423 0>, /* CE9 */
+ <0 424 0>, /* CE10 */
+ <0 425 0>; /* CE11 */
+ qcom,wlan-msa-memory = <0x100000>;
+ };
+
+ qcom,lpass@15700000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x15700000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmcc CXO_PIL_LPASS_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ memory-region = <&adsp_fw_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ status = "ok";
+ };
+
+ qcom,venus@cce0000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0xcce0000 0x4000>;
+
+ vdd-supply = <&gdsc_venus>;
+ qcom,proxy-reg-names = "vdd";
+
+ clocks = <&clock_mmss MMSS_VIDEO_CORE_CLK>,
+ <&clock_mmss MMSS_VIDEO_AHB_CLK>,
+ <&clock_mmss MMSS_VIDEO_AXI_CLK>;
+ clock-names = "core_clk","iface_clk",
+ "bus_clk";
+ qcom,proxy-clock-names = "core_clk",
+ "iface_clk","bus_clk";
+
+ qcom,msm-bus,name = "pil-venus";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <63 512 0 0>,
+ <63 512 0 304000>;
+
+ qcom,pas-id = <9>;
+ qcom,proxy-timeout-ms = <100>;
+ qcom,firmware-name = "venus";
+ memory-region = <&venus_fw_mem>;
+ status = "ok";
+ };
+};
+
+#include "msmtriton-ion.dtsi"
+#include "msmfalcon-regulator.dtsi"
+#include "msm-gdsc-falcon.dtsi"
+
+&gdsc_usb30 {
+ clock-names = "core_clk";
+ clocks = <&clock_gcc GCC_USB30_MASTER_CLK>;
+ status = "ok";
+};
+
+&gdsc_ufs {
+ status = "ok";
+};
+
+&gdsc_bimc_smmu {
+ clock-names = "bus_clk";
+ clocks = <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>;
+ proxy-supply = <&gdsc_bimc_smmu>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_adsp {
+ status = "ok";
+};
+
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_venus_core0 {
+ qcom,support-hw-trigger;
+ status = "ok";
+};
+
+&gdsc_camss_top {
+ status = "ok";
+};
+
+&gdsc_vfe0 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_vfe1 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_cpp {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_mdss {
+ clock-names = "bus_clk", "rot_clk";
+ clocks = <&clock_mmss MMSS_MDSS_AXI_CLK>,
+ <&clock_mmss MMSS_MDSS_ROT_CLK>;
+ proxy-supply = <&gdsc_mdss>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_gpu_gx {
+ clock-names = "bimc_core_clk", "core_clk", "core_root_clk";
+ clocks = <&clock_gcc GCC_GPU_BIMC_GFX_CLK>,
+ <&clock_gfx GPUCC_GFX3D_CLK>,
+ <&clock_gfx GFX3D_CLK_SRC>;
+ qcom,force-enable-root-clk;
+ parent-supply = <&gfx_vreg_corner>;
+ status = "ok";
+};
+
+&gdsc_gpu_cx {
+ status = "ok";
};
diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig
index 64da50bb55b2..069603eefe48 100644
--- a/arch/arm/configs/msmfalcon_defconfig
+++ b/arch/arm/configs/msmfalcon_defconfig
@@ -336,7 +336,6 @@ CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -420,7 +419,7 @@ CONFIG_IPA3=y
CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
+CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 036d6aa5c062..0bda100dfb5a 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -13,6 +13,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
@@ -21,6 +22,7 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -319,6 +321,7 @@ CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
@@ -435,6 +438,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -532,6 +536,7 @@ CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 77f0129776a3..3568fe4ed29f 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -13,6 +13,7 @@ CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
@@ -21,6 +22,7 @@ CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -322,6 +324,7 @@ CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
@@ -437,6 +440,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -551,6 +555,7 @@ CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/msmfalcon-perf_defconfig b/arch/arm64/configs/msmfalcon-perf_defconfig
index 1bc352704893..5d271cad0aad 100644
--- a/arch/arm64/configs/msmfalcon-perf_defconfig
+++ b/arch/arm64/configs/msmfalcon-perf_defconfig
@@ -473,6 +473,7 @@ CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index 34f0da3c37a4..707bc68c825f 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -483,6 +483,7 @@ CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
@@ -519,6 +520,7 @@ CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 24165784b803..32441df2270e 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -27,19 +27,24 @@
#include <asm-generic/mm_hooks.h>
#include <asm/cputype.h>
#include <asm/pgtable.h>
+#include <linux/msm_rtb.h>
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ pid_t pid = task_pid_nr(next);
asm(
" msr contextidr_el1, %0\n"
" isb"
:
- : "r" (task_pid_nr(next)));
+ : "r" (pid));
+ uncached_logk(LOGK_CTXID, (void *)(u64)pid);
+
}
#else
static inline void contextidr_thread_switch(struct task_struct *next)
{
+ uncached_logk(LOGK_CTXID, (void *)(u64)task_pid_nr(next));
}
#endif
diff --git a/drivers/base/regmap/regmap-swr.c b/drivers/base/regmap/regmap-swr.c
index 027cbfc505ab..1641c374b189 100644
--- a/drivers/base/regmap/regmap-swr.c
+++ b/drivers/base/regmap/regmap-swr.c
@@ -28,11 +28,16 @@ static int regmap_swr_gather_write(void *context,
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
size_t val_bytes;
int i, ret = 0;
u16 reg_addr = 0;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr device is NULL\n", __func__);
return -EINVAL;
@@ -154,10 +159,15 @@ static int regmap_swr_read(void *context,
struct device *dev = context;
struct swr_device *swr = to_swr_device(dev);
struct regmap *map = dev_get_regmap(dev, NULL);
- size_t addr_bytes = map->format.reg_bytes;
+ size_t addr_bytes;
int ret = 0;
u16 reg_addr = 0;
+ if (map == NULL) {
+ dev_err(dev, "%s: regmap is NULL\n", __func__);
+ return -EINVAL;
+ }
+ addr_bytes = map->format.reg_bytes;
if (swr == NULL) {
dev_err(dev, "%s: swr is NULL\n", __func__);
return -EINVAL;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13116f010e89..67c1207d35be 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -163,6 +163,7 @@ struct fastrpc_smmu {
int enabled;
int faults;
int secure;
+ int coherent;
};
struct fastrpc_session_ctx {
@@ -1129,6 +1130,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
+ if (ctx->fl->sctx->smmu.coherent)
+ continue;
if (map && map->uncached)
continue;
if (rpra[i].buf.len && ctx->overps[oix]->mstart)
@@ -1141,7 +1144,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = ctx->lpra[inh + i].h;
}
- dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
+ if (!ctx->fl->sctx->smmu.coherent)
+ dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
bail:
return err;
}
@@ -1372,13 +1376,15 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- inv_args_pre(ctx);
- if (FASTRPC_MODE_SERIAL == mode)
- inv_args(ctx);
+ if (!fl->sctx->smmu.coherent) {
+ inv_args_pre(ctx);
+ if (mode == FASTRPC_MODE_SERIAL)
+ inv_args(ctx);
+ }
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
if (err)
goto bail;
- if (FASTRPC_MODE_PARALLEL == mode)
+ if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
inv_args(ctx);
wait:
if (kernel)
@@ -2275,7 +2281,6 @@ static int fastrpc_cb_probe(struct device *dev)
const char *name;
unsigned int start = 0x80000000;
int err = 0, i;
- int disable_htw = 1;
int secure_vmid = VMID_CP_PIXEL;
VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
@@ -2302,6 +2307,8 @@ static int fastrpc_cb_probe(struct device *dev)
sess = &chan->session[chan->sesscount];
sess->smmu.cb = iommuspec.args[0];
sess->used = 0;
+ sess->smmu.coherent = of_property_read_bool(dev->of_node,
+ "dma-coherent");
sess->smmu.secure = of_property_read_bool(dev->of_node,
"qcom,secure-context-bank");
if (sess->smmu.secure)
@@ -2311,9 +2318,6 @@ static int fastrpc_cb_probe(struct device *dev)
start, 0x7fffffff)));
if (err)
goto bail;
- iommu_domain_set_attr(sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
iommu_set_fault_handler(sess->smmu.mapping->domain,
fastrpc_smmu_fault_handler, sess);
if (sess->smmu.secure)
@@ -2341,7 +2345,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
unsigned int *range = 0, range_size = 0;
unsigned int *sids = 0, sids_size = 0;
int err = 0, ret = 0, i;
- int disable_htw = 1;
VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
dev->of_node,
@@ -2395,9 +2398,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
range[0], range[1])));
if (err)
goto bail;
- iommu_domain_set_attr(first_sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
first_sess->smmu.mapping));
if (err)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ed763c7e98fc..1c625764133d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -547,6 +547,26 @@ void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
/*
+ * Aggregate the rate of all child nodes which are enabled and exclude the
+ * child node which requests for clk_aggregate_rate.
+ */
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+ const struct clk_core *parent)
+{
+ struct clk_core *child;
+ unsigned long aggre_rate = 0;
+
+ hlist_for_each_entry(child, &parent->children, child_node) {
+ if (child->enable_count &&
+ strcmp(child->name, hw->init->name))
+ aggre_rate = max(child->rate, aggre_rate);
+ }
+
+ return aggre_rate;
+}
+EXPORT_SYMBOL_GPL(clk_aggregate_rate);
+
+/*
* Helper for finding best parent to provide a given frequency. This can be used
* directly as a determine_rate callback (e.g. for a mux), or from a more
* complex clock that may combine a mux with other operations.
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index 05272118af16..46e791b3cb99 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -2374,7 +2374,7 @@ static struct mux_clk gcc_debug_mux = {
{ &debug_cpu_clk.c, 0x00c0 },
{ &snoc_clk.c, 0x0000 },
{ &cnoc_clk.c, 0x000e },
- { &bimc_clk.c, 0x00a9 },
+ { &bimc_clk.c, 0x014e },
{ &gcc_mmss_sys_noc_axi_clk.c, 0x001f },
{ &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 },
{ &gcc_usb30_master_clk.c, 0x003e },
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
index 873dd40d3a44..9c1cdf967fb1 100644
--- a/drivers/clk/msm/clock-mmss-cobalt.c
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -399,6 +399,8 @@ static struct clk_freq_tbl ftbl_cpp_clk_src[] = {
static struct clk_freq_tbl ftbl_cpp_clk_src_vq[] = {
F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
@@ -1112,8 +1114,8 @@ static struct rcg_clk dp_pixel_clk_src = {
.parent = &ext_dp_phy_pll_vco.c,
.ops = &clk_ops_rcg_dp,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 148380, LOW, 296740,
- NOMINAL, 593470),
+ VDD_DIG_FMAX_MAP3(LOWER, 154000000, LOW, 337500000,
+ NOMINAL, 675000000),
CLK_INIT(dp_pixel_clk_src.c),
},
};
@@ -2703,7 +2705,6 @@ static void msm_mmsscc_hamster_fixup(void)
csi2phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
mdp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
- dp_pixel_clk_src.c.fmax[VDD_DIG_LOWER] = 154000000;
extpclk_clk_src.c.fmax[VDD_DIG_LOW] = 312500000;
extpclk_clk_src.c.fmax[VDD_DIG_LOW_L1] = 375000000;
rot_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
@@ -2736,8 +2737,6 @@ static void msm_mmsscc_v2_fixup(void)
csi1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
csi2_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
csi3_clk_src.c.fmax[VDD_DIG_NOMINAL] = 480000000;
-
- dp_pixel_clk_src.c.fmax[VDD_DIG_LOWER] = 148380000;
}
int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 969486e441bb..a119c0b27321 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -183,7 +183,9 @@ enum clk_osm_trace_packet_id {
#define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0
#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4
+#define OSM_PLL_SW_OVERRIDE_EN 0x10C0
+#define PLL_SW_OVERRIDE_DROOP_EN BIT(0)
#define DCVS_DROOP_TIMER_CTRL 0x10B8
#define SEQ_MEM_ADDR 0x500
#define SEQ_CFG_BR_ADDR 0x170
@@ -200,6 +202,8 @@ enum clk_osm_trace_packet_id {
#define TRACE_CTRL_EN_MASK BIT(0)
#define TRACE_CTRL_ENABLE 1
#define TRACE_CTRL_DISABLE 0
+#define TRACE_CTRL_ENABLE_WDOG_STATUS BIT(30)
+#define TRACE_CTRL_ENABLE_WDOG_STATUS_MASK BIT(30)
#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
@@ -219,6 +223,11 @@ enum clk_osm_trace_packet_id {
#define PERFCL_EFUSE_SHIFT 29
#define PERFCL_EFUSE_MASK 0x7
+#define MSMCOBALTV1_PWRCL_BOOT_RATE 1478400000
+#define MSMCOBALTV1_PERFCL_BOOT_RATE 1536000000
+#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000
+#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000
+
static void __iomem *virt_base;
static void __iomem *debug_base;
@@ -1767,7 +1776,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
val = clk_osm_read_reg(c,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
- val |= BVAL(31, 16, clk_osm_count_ns(c, 500));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, 250));
clk_osm_write_reg(c, val,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
}
@@ -1784,7 +1793,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
val = clk_osm_read_reg(c,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
- val |= BVAL(15, 0, clk_osm_count_ns(c, 500));
+ val |= BVAL(15, 0, clk_osm_count_ns(c, 250));
clk_osm_write_reg(c, val,
DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
}
@@ -1798,7 +1807,7 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) {
clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
- clk_osm_write_reg(c, clk_osm_count_ns(c, 250),
+ clk_osm_write_reg(c, clk_osm_count_ns(c, 5),
DROOP_RELEASE_TIMER_CTRL);
clk_osm_write_reg(c, clk_osm_count_ns(c, 500),
DCVS_DROOP_TIMER_CTRL);
@@ -1807,6 +1816,11 @@ static void clk_osm_setup_fsms(struct clk_osm *c)
BVAL(6, 0, 0x8);
clk_osm_write_reg(c, val, DROOP_CTRL_REG);
}
+
+ /* Enable the PLL Droop Override */
+ val = clk_osm_read_reg(c, OSM_PLL_SW_OVERRIDE_EN);
+ val |= PLL_SW_OVERRIDE_DROOP_EN;
+ clk_osm_write_reg(c, val, OSM_PLL_SW_OVERRIDE_EN);
}
static void clk_osm_do_additional_setup(struct clk_osm *c,
@@ -2680,6 +2694,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
+ if (msmcobalt_v2) {
+ /* Enable OSM WDOG registers */
+ clk_osm_masked_write_reg(&pwrcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ clk_osm_masked_write_reg(&perfcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ }
+
/*
* The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
* frequency before enabling OSM. LUT index 0 is always sourced from
@@ -2693,18 +2719,22 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
}
clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
- /* Set 300MHz index */
- rc = clk_set_rate(&pwrcl_clk.c, init_rate);
+ /* Set boot rate */
+ rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PWRCL_BOOT_RATE :
+ MSMCOBALTV2_PWRCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
}
- rc = clk_set_rate(&perfcl_clk.c, init_rate);
+ rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PERFCL_BOOT_RATE :
+ MSMCOBALTV2_PERFCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index 9a080e4ee39b..a574a9cd2b5a 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/clk/msm-clock-generic.h>
+#include <linux/usb/usbpd.h>
#include "mdss-pll.h"
#include "mdss-dp-pll.h"
@@ -172,9 +173,27 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
{
u32 res = 0;
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, ln_cnt, orientation);
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x2d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x35);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3d);
+ }
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
/* Make sure the PHY register writes are done */
wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -314,8 +333,13 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
/* Make sure the PLL register writes are done */
wmb();
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x48);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x58);
+
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_TX0_TX1_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
@@ -427,6 +451,12 @@ static int dp_pll_enable(struct clk *c)
u32 status;
struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value, bias_en, drvr_en;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x01);
@@ -474,18 +504,45 @@ static int dp_pll_enable(struct clk *c)
pr_debug("%s: PLL is locked\n", __func__);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ if (ln_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ drvr_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
+ drvr_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
0x0a);
@@ -615,7 +672,7 @@ int dp_vco_prepare(struct clk *c)
rc = dp_pll_enable(c);
if (rc) {
mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
+ pr_err("ndx=%d failed to enable dp pll\n",
dp_pll_res->index);
goto error;
}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
index d89545b38e64..28f21ed1fe0d 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -41,6 +41,7 @@
#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
+#define DP_PHY_SPARE0 0x00A8
#define DP_PHY_STATUS 0x00BC
/* Tx registers */
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
index 1228d925761b..4b2d8bba0940 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
@@ -1016,19 +1016,19 @@ static struct clk_mux_ops mdss_mux_ops = {
* | vco_clk |
* +-------+-------+
* |
- * +--------------------------------------+
- * | |
- * +-------v-------+ |
- * | bitclk_src | |
- * | DIV(1..15) | |
- * +-------+-------+ |
- * | |
- * +--------------------+ |
- * Shadow Path | | |
- * + +-------v-------+ +------v------+ +------v-------+
- * | | byteclk_src | |post_bit_div | |post_vco_div |
- * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
- * | +-------+-------+ +------+------+ +------+-------+
+ * +----------------------+------------------+
+ * | | |
+ * +-------v-------+ +-------v-------+ +-------v-------+
+ * | bitclk_src | | post_vco_div1 | | post_vco_div4 |
+ * | DIV(1..15) | +-------+-------+ +-------+-------+
+ * +-------+-------+ | |
+ * | +------------+ |
+ * +--------------------+ | |
+ * Shadow Path | | | |
+ * + +-------v-------+ +------v------+ +---v-----v------+
+ * | | byteclk_src | |post_bit_div | \ post_vco_mux /
+ * | | DIV(8) | |DIV(1,2) | \ /
+ * | +-------+-------+ +------+------+ +---+------+
* | | | |
* | | +------+ +----+
* | +--------+ | |
@@ -1085,19 +1085,51 @@ static struct div_clk dsi0pll_bitclk_src = {
}
};
-static struct div_clk dsi0pll_post_vco_div = {
+static struct div_clk dsi0pll_post_vco_div1 = {
.data = {
.div = 1,
.min_div = 1,
+ .max_div = 1,
+ },
+ .ops = &clk_post_vco_div_ops,
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_post_vco_div1",
+ .ops = &clk_ops_post_vco_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_vco_div1.c),
+ }
+};
+
+static struct div_clk dsi0pll_post_vco_div4 = {
+ .data = {
+ .div = 4,
+ .min_div = 4,
.max_div = 4,
},
.ops = &clk_post_vco_div_ops,
.c = {
.parent = &dsi0pll_vco_clk.c,
- .dbg_name = "dsi0pll_post_vco_div",
+ .dbg_name = "dsi0pll_post_vco_div4",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi0pll_post_vco_div.c),
+ CLK_INIT(dsi0pll_post_vco_div4.c),
+ }
+};
+
+static struct mux_clk dsi0pll_post_vco_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_post_vco_div1.c, 0},
+ {&dsi0pll_post_vco_div4.c, 1},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi0pll_post_vco_div1.c,
+ .dbg_name = "dsi0pll_post_vco_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_vco_mux.c),
}
};
@@ -1121,7 +1153,7 @@ static struct mux_clk dsi0pll_pclk_src_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi0pll_post_bit_div.c, 0},
- {&dsi0pll_post_vco_div.c, 1},
+ {&dsi0pll_post_vco_mux.c, 1},
},
.ops = &mdss_mux_ops,
.c = {
@@ -1222,19 +1254,51 @@ static struct div_clk dsi1pll_bitclk_src = {
}
};
-static struct div_clk dsi1pll_post_vco_div = {
+static struct div_clk dsi1pll_post_vco_div1 = {
.data = {
.div = 1,
.min_div = 1,
+ .max_div = 1,
+ },
+ .ops = &clk_post_vco_div_ops,
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_post_vco_div1",
+ .ops = &clk_ops_post_vco_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_vco_div1.c),
+ }
+};
+
+static struct div_clk dsi1pll_post_vco_div4 = {
+ .data = {
+ .div = 4,
+ .min_div = 4,
.max_div = 4,
},
.ops = &clk_post_vco_div_ops,
.c = {
.parent = &dsi1pll_vco_clk.c,
- .dbg_name = "dsi1pll_post_vco_div",
+ .dbg_name = "dsi1pll_post_vco_div4",
.ops = &clk_ops_post_vco_div_c,
.flags = CLKFLAG_NO_RATE_CACHE,
- CLK_INIT(dsi1pll_post_vco_div.c),
+ CLK_INIT(dsi1pll_post_vco_div4.c),
+ }
+};
+
+static struct mux_clk dsi1pll_post_vco_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_post_vco_div1.c, 0},
+ {&dsi1pll_post_vco_div4.c, 1},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi1pll_post_vco_div1.c,
+ .dbg_name = "dsi1pll_post_vco_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_vco_mux.c),
}
};
@@ -1258,7 +1322,7 @@ static struct mux_clk dsi1pll_pclk_src_mux = {
.num_parents = 2,
.parents = (struct clk_src[]) {
{&dsi1pll_post_bit_div.c, 0},
- {&dsi1pll_post_vco_div.c, 1},
+ {&dsi1pll_post_vco_mux.c, 1},
},
.ops = &mdss_mux_ops,
.c = {
@@ -1338,7 +1402,9 @@ static struct clk_lookup mdss_dsi_pll0cc_cobalt[] = {
CLK_LIST(dsi0pll_pclk_src),
CLK_LIST(dsi0pll_pclk_src_mux),
CLK_LIST(dsi0pll_post_bit_div),
- CLK_LIST(dsi0pll_post_vco_div),
+ CLK_LIST(dsi0pll_post_vco_mux),
+ CLK_LIST(dsi0pll_post_vco_div1),
+ CLK_LIST(dsi0pll_post_vco_div4),
CLK_LIST(dsi0pll_bitclk_src),
CLK_LIST(dsi0pll_vco_clk),
};
@@ -1349,7 +1415,9 @@ static struct clk_lookup mdss_dsi_pll1cc_cobalt[] = {
CLK_LIST(dsi1pll_pclk_src),
CLK_LIST(dsi1pll_pclk_src_mux),
CLK_LIST(dsi1pll_post_bit_div),
- CLK_LIST(dsi1pll_post_vco_div),
+ CLK_LIST(dsi1pll_post_vco_mux),
+ CLK_LIST(dsi1pll_post_vco_div1),
+ CLK_LIST(dsi1pll_post_vco_div4),
CLK_LIST(dsi1pll_bitclk_src),
CLK_LIST(dsi1pll_vco_clk),
};
@@ -1407,7 +1475,9 @@ int dsi_pll_clock_register_cobalt(struct platform_device *pdev,
dsi0pll_pclk_src.priv = pll_res;
dsi0pll_pclk_src_mux.priv = pll_res;
dsi0pll_post_bit_div.priv = pll_res;
- dsi0pll_post_vco_div.priv = pll_res;
+ dsi0pll_post_vco_mux.priv = pll_res;
+ dsi0pll_post_vco_div1.priv = pll_res;
+ dsi0pll_post_vco_div4.priv = pll_res;
dsi0pll_bitclk_src.priv = pll_res;
dsi0pll_vco_clk.priv = pll_res;
@@ -1421,7 +1491,9 @@ int dsi_pll_clock_register_cobalt(struct platform_device *pdev,
dsi1pll_pclk_src.priv = pll_res;
dsi1pll_pclk_src_mux.priv = pll_res;
dsi1pll_post_bit_div.priv = pll_res;
- dsi1pll_post_vco_div.priv = pll_res;
+ dsi1pll_post_vco_mux.priv = pll_res;
+ dsi1pll_post_vco_div1.priv = pll_res;
+ dsi1pll_post_vco_div4.priv = pll_res;
dsi1pll_bitclk_src.priv = pll_res;
dsi1pll_vco_clk.priv = pll_res;
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 5b9ce12c1e02..e39686ca4feb 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -155,6 +155,7 @@ config MSM_MMCC_8996
config MSM_GCC_FALCON
tristate "MSMFALCON Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
---help---
Support for the global clock controller on Qualcomm Technologies, Inc
@@ -162,6 +163,16 @@ config MSM_GCC_FALCON
Say Y if you want to use peripheral devices such as UART, SPI, I2C,
USB, UFS, SD/eMMC, PCIe, etc.
+config MSM_GPUCC_FALCON
+ tristate "MSMFALCON Graphics Clock Controller"
+ select MSM_GCC_FALCON
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the graphics clock controller on Qualcomm Technologies, Inc
+ MSMfalcon devices.
+ Say Y if you want to support graphics controller devices which will
+ be required to enable those device.
+
config QCOM_HFPLL
tristate "High-Frequency PLL (HFPLL) Clock Controller"
depends on COMMON_CLK_QCOM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index af58f206bc4a..adebefd63e71 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -11,7 +11,7 @@ clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
-clk-qcom-y += reset.o
+clk-qcom-y += reset.o clk-voter.o
clk-qcom-y += clk-dummy.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
@@ -29,6 +29,7 @@ obj-$(CONFIG_MSM_GCC_FALCON) += gcc-msmfalcon.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_MSM_GPUCC_FALCON) += gpucc-msmfalcon.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
obj-$(CONFIG_KRAITCC) += krait-cc.o
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 933a208392bd..6d12ddb3e245 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -935,6 +935,8 @@ static int clk_gfx3d_src_set_rate_and_parent(struct clk_hw *hw,
}
const struct clk_ops clk_gfx3d_src_ops = {
+ .enable = clk_rcg2_enable,
+ .disable = clk_rcg2_disable,
.is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent,
.set_parent = clk_rcg2_set_parent,
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index ac007ec667bb..612e7b37a8d0 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -29,6 +29,8 @@
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/mfd/qcom-rpm.h>
+#include "clk-voter.h"
+
#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
@@ -603,7 +605,7 @@ DEFINE_CLK_SMD_RPM(msmfalcon, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msmfalcon, cnoc_periph_clk, cnoc_periph_a_clk,
QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msmfalcon, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
-DEFINE_CLK_SMD_RPM(msmfalcon, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+DEFINE_CLK_SMD_RPM(msmfalcon, mmssnoc_axi_clk, mmssnoc_axi_a_clk,
QCOM_SMD_RPM_MMAXI_CLK, 0);
DEFINE_CLK_SMD_RPM(msmfalcon, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
DEFINE_CLK_SMD_RPM(msmfalcon, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
@@ -624,6 +626,27 @@ DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, ln_bb_clk2_pin,
ln_bb_clk2_pin_ao, 0x2);
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msmfalcon, ln_bb_clk3_pin,
ln_bb_clk3_pin_ao, 0x3);
+/* Voter clocks */
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, bimc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, bimc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, cnoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, cnoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, snoc_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, snoc_a_clk, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_periph_keepalive_a_clk, cnoc_periph_a_clk,
+ LONG_MAX);
+static DEFINE_CLK_VOTER(mcd_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, ce1_clk, 85710000);
+static DEFINE_CLK_VOTER(scm_ce1_clk, ce1_clk, 85710000);
+
+static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, cxo);
+static DEFINE_CLK_BRANCH_VOTER(cxo_lpm_clk, cxo);
+static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, cxo);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, cxo);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_cdsp_clk, cxo);
+
static struct clk_hw *msmfalcon_clks[] = {
[RPM_XO_CLK_SRC] = &msmfalcon_cxo.hw,
[RPM_XO_A_CLK_SRC] = &msmfalcon_cxo_a.hw,
@@ -639,8 +662,8 @@ static struct clk_hw *msmfalcon_clks[] = {
[RPM_AGGR2_NOC_A_CLK] = &msmfalcon_aggre2_noc_a_clk.hw,
[RPM_CNOC_CLK] = &msmfalcon_cnoc_clk.hw,
[RPM_CNOC_A_CLK] = &msmfalcon_cnoc_a_clk.hw,
- [RPM_MMAXI_CLK] = &msmfalcon_mmssnoc_axi_rpm_clk.hw,
- [RPM_MMAXI_A_CLK] = &msmfalcon_mmssnoc_axi_rpm_a_clk.hw,
+ [RPM_MMAXI_CLK] = &msmfalcon_mmssnoc_axi_clk.hw,
+ [RPM_MMAXI_A_CLK] = &msmfalcon_mmssnoc_axi_a_clk.hw,
[RPM_IPA_CLK] = &msmfalcon_ipa_clk.hw,
[RPM_IPA_A_CLK] = &msmfalcon_ipa_a_clk.hw,
[RPM_CE1_CLK] = &msmfalcon_ce1_clk.hw,
@@ -661,6 +684,25 @@ static struct clk_hw *msmfalcon_clks[] = {
[RPM_LN_BB_CLK3_PIN_AO] = &msmfalcon_ln_bb_clk3_pin_ao.hw,
[RPM_CNOC_PERIPH_CLK] = &msmfalcon_cnoc_periph_clk.hw,
[RPM_CNOC_PERIPH_A_CLK] = &msmfalcon_cnoc_periph_a_clk.hw,
+
+ /* Voter Clocks */
+ [BIMC_MSMBUS_CLK] = &bimc_msmbus_clk.hw,
+ [BIMC_MSMBUS_A_CLK] = &bimc_msmbus_a_clk.hw,
+ [CNOC_MSMBUS_CLK] = &cnoc_msmbus_clk.hw,
+ [CNOC_MSMBUS_A_CLK] = &cnoc_msmbus_a_clk.hw,
+ [MCD_CE1_CLK] = &mcd_ce1_clk.hw,
+ [QCEDEV_CE1_CLK] = &qcedev_ce1_clk.hw,
+ [QCRYPTO_CE1_CLK] = &qcrypto_ce1_clk.hw,
+ [QSEECOM_CE1_CLK] = &qseecom_ce1_clk.hw,
+ [SCM_CE1_CLK] = &scm_ce1_clk.hw,
+ [SNOC_MSMBUS_CLK] = &snoc_msmbus_clk.hw,
+ [SNOC_MSMBUS_A_CLK] = &snoc_msmbus_a_clk.hw,
+ [CXO_DWC3_CLK] = &cxo_dwc3_clk.hw,
+ [CXO_LPM_CLK] = &cxo_lpm_clk.hw,
+ [CXO_OTG_CLK] = &cxo_otg_clk.hw,
+ [CXO_PIL_LPASS_CLK] = &cxo_pil_lpass_clk.hw,
+ [CXO_PIL_CDSP_CLK] = &cxo_pil_cdsp_clk.hw,
+ [CNOC_PERIPH_KEEPALIVE_A_CLK] = &cnoc_periph_keepalive_a_clk.hw,
};
static const struct rpm_smd_clk_desc rpm_clk_msmfalcon = {
@@ -757,9 +799,14 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
/* Keep an active vote on CXO in case no other driver votes for it */
if (is_8996)
clk_prepare_enable(msm8996_cxo_a.hw.clk);
- else if (is_falcon)
+ else if (is_falcon) {
clk_prepare_enable(msmfalcon_cxo_a.hw.clk);
+ /* Hold an active set vote for the cnoc_periph resource */
+ clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
+ clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
+ }
+
dev_info(&pdev->dev, "Registered RPM clocks\n");
return 0;
diff --git a/drivers/clk/qcom/clk-voter.c b/drivers/clk/qcom/clk-voter.c
new file mode 100644
index 000000000000..d3409b9e6b64
--- /dev/null
+++ b/drivers/clk/qcom/clk-voter.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+
+#include "clk-voter.h"
+
+static int voter_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int ret = 0;
+ struct clk_voter *v = to_clk_voter(hw);
+ unsigned long cur_rate, new_rate, other_rate = 0;
+
+ if (v->is_branch)
+ return ret;
+
+ if (v->enabled) {
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+
+ /*
+ * Get the aggregate rate without this clock's vote and update
+ * if the new rate is different than the current rate.
+ */
+ other_rate = clk_aggregate_rate(hw, parent->core);
+
+ cur_rate = max(other_rate, clk_get_rate(hw->clk));
+ new_rate = max(other_rate, rate);
+
+ if (new_rate != cur_rate) {
+ ret = clk_set_rate(parent->clk, new_rate);
+ if (ret)
+ return ret;
+ }
+ }
+ v->rate = rate;
+
+ return ret;
+}
+
+static int voter_clk_prepare(struct clk_hw *hw)
+{
+ int ret = 0;
+ unsigned long cur_rate;
+ struct clk_hw *parent;
+ struct clk_voter *v = to_clk_voter(hw);
+
+ parent = clk_hw_get_parent(hw);
+
+ if (v->is_branch) {
+ v->enabled = true;
+ return ret;
+ }
+
+ /*
+ * Increase the rate if this clock is voting for a higher rate
+ * than the current rate.
+ */
+ cur_rate = clk_aggregate_rate(hw, parent->core);
+
+ if (v->rate > cur_rate) {
+ ret = clk_set_rate(parent->clk, v->rate);
+ if (ret)
+ return ret;
+ }
+ v->enabled = true;
+
+ return ret;
+}
+
+static void voter_clk_unprepare(struct clk_hw *hw)
+{
+ unsigned long cur_rate, new_rate;
+ struct clk_hw *parent;
+ struct clk_voter *v = to_clk_voter(hw);
+
+
+ parent = clk_hw_get_parent(hw);
+
+ /*
+ * Decrease the rate if this clock was the only one voting for
+ * the highest rate.
+ */
+ v->enabled = false;
+ if (v->is_branch)
+ return;
+
+ new_rate = clk_aggregate_rate(hw, parent->core);
+ cur_rate = max(new_rate, v->rate);
+
+ if (new_rate < cur_rate)
+ clk_set_rate(parent->clk, new_rate);
+}
+
+static int voter_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_voter *v = to_clk_voter(hw);
+
+ return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return clk_hw_round_rate(clk_hw_get_parent(hw), rate);
+}
+
+static unsigned long voter_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_voter *v = to_clk_voter(hw);
+
+ return v->rate;
+}
+
+struct clk_ops clk_ops_voter = {
+ .prepare = voter_clk_prepare,
+ .unprepare = voter_clk_unprepare,
+ .set_rate = voter_clk_set_rate,
+ .is_enabled = voter_clk_is_enabled,
+ .round_rate = voter_clk_round_rate,
+ .recalc_rate = voter_clk_recalc_rate,
+};
diff --git a/drivers/clk/qcom/clk-voter.h b/drivers/clk/qcom/clk-voter.h
new file mode 100644
index 000000000000..27092ae7d131
--- /dev/null
+++ b/drivers/clk/qcom/clk-voter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __QCOM_CLK_VOTER_H__
+#define __QCOM_CLK_VOTER_H__
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+struct clk_voter {
+ int is_branch;
+ bool enabled;
+ struct clk_hw hw;
+ unsigned long rate;
+};
+
+extern struct clk_ops clk_ops_voter;
+
+#define to_clk_voter(_hw) container_of(_hw, struct clk_voter, hw)
+
+#define __DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate, _is_branch) \
+ struct clk_voter clk_name = { \
+ .is_branch = (_is_branch), \
+ .rate = _default_rate, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_ops_voter, \
+ .name = #clk_name, \
+ .parent_names = (const char *[]){ #_parent_name }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate) \
+ __DEFINE_CLK_VOTER(clk_name, _parent_name, _default_rate, 0)
+
+#define DEFINE_CLK_BRANCH_VOTER(clk_name, _parent_name) \
+ __DEFINE_CLK_VOTER(clk_name, _parent_name, 1000, 1)
+
+#endif
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index f7c226ab4307..423e975dffee 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -177,7 +177,7 @@ EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
int qcom_cc_really_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
- int i, ret;
+ int i = 0, ret, j = 0;
struct device *dev = &pdev->dev;
struct clk *clk;
struct clk_onecell_data *data;
@@ -187,8 +187,10 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
+ struct clk_hw **hw_clks = desc->hwclks;
- cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) *
+ (num_clks + desc->num_hwclks),
GFP_KERNEL);
if (!cc)
return -ENOMEM;
@@ -196,17 +198,32 @@ int qcom_cc_really_probe(struct platform_device *pdev,
clks = cc->clks;
data = &cc->data;
data->clks = clks;
- data->clk_num = num_clks;
+ data->clk_num = num_clks + desc->num_hwclks;
- for (i = 0; i < num_clks; i++) {
- if (!rclks[i]) {
+ for (i = 0; i < desc->num_hwclks; i++) {
+ if (!hw_clks[i]) {
clks[i] = ERR_PTR(-ENOENT);
continue;
}
- clk = devm_clk_register_regmap(dev, rclks[i]);
+ clk = devm_clk_register(dev, hw_clks[i]);
if (IS_ERR(clk))
return PTR_ERR(clk);
clks[i] = clk;
+ pr_debug("Index for hw_clocks %d added %s\n", i,
+ __clk_get_name(clk));
+ }
+
+ for (j = i; j < num_clks; j++) {
+ if (!rclks[j]) {
+ clks[j] = ERR_PTR(-ENOENT);
+ continue;
+ }
+ clk = devm_clk_register_regmap(dev, rclks[j]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[j] = clk;
+ pr_debug("Index for Regmap clocks %d added %s\n", j,
+ __clk_get_name(clk));
}
ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 10cabca921be..e3f450533470 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -25,7 +25,9 @@ struct parent_map;
struct qcom_cc_desc {
const struct regmap_config *config;
struct clk_regmap **clks;
+ struct clk_hw **hwclks;
size_t num_clks;
+ size_t num_hwclks;
const struct qcom_reset_map *resets;
size_t num_resets;
struct gdsc **gdscs;
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index d353cc9ade73..2cbc9dff047b 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -2527,6 +2527,32 @@ static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
},
};
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_fixed_factor gcc_ce1_ahb_m_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ce1_ahb_m_clk",
@@ -2683,6 +2709,10 @@ static struct clk_regmap *gcc_falcon_clocks[] = {
[GCC_UFS_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_ice_core_hw_ctl_clk.clkr,
[GCC_UFS_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_aux_hw_ctl_clk.clkr,
[GCC_UFS_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_unipro_core_hw_ctl_clk.clkr,
+ [HLOS1_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [HLOS2_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static const struct qcom_reset_map gcc_falcon_resets[] = {
@@ -2709,6 +2739,8 @@ static const struct qcom_cc_desc gcc_falcon_desc = {
.config = &gcc_falcon_regmap_config,
.clks = gcc_falcon_clocks,
.num_clks = ARRAY_SIZE(gcc_falcon_clocks),
+ .hwclks = gcc_msmfalcon_hws,
+ .num_hwclks = ARRAY_SIZE(gcc_msmfalcon_hws),
.resets = gcc_falcon_resets,
.num_resets = ARRAY_SIZE(gcc_falcon_resets),
};
@@ -2735,13 +2767,6 @@ static int gcc_falcon_probe(struct platform_device *pdev)
*/
regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
- /* register hardware clocks */
- for (i = 0; i < ARRAY_SIZE(gcc_msmfalcon_hws); i++) {
- clk = devm_clk_register(&pdev->dev, gcc_msmfalcon_hws[i]);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
- }
-
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
diff --git a/drivers/clk/qcom/gpucc-msmfalcon.c b/drivers/clk/qcom/gpucc-msmfalcon.c
new file mode 100644
index 000000000000..f194abb471cd
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-msmfalcon.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <dt-bindings/clock/qcom,gpu-msmfalcon.h>
+
+#include "clk-alpha-pll.h"
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "vdd-level-falcon.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+#define F_GFX(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) }
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGS_INIT(vdd_gfx, 1);
+
+enum {
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_PLL0_PLL_OUT_MAIN,
+ P_GPU_PLL1_PLL_OUT_MAIN,
+ P_XO,
+};
+
+static const struct parent_map gpucc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpucc_parent_names_0[] = {
+ "cxo_a",
+ "gcc_gpu_gpll0_clk",
+ "gcc_gpu_gpll0_div_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gpucc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPU_PLL0_PLL_OUT_MAIN, 1 },
+ { P_GPU_PLL1_PLL_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpucc_parent_names_1[] = {
+ "xo",
+ "gpu_pll0_pll_out_main",
+ "gpu_pll1_pll_out_main",
+ "gcc_gpu_gpll0_clk",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco gpu_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+/* 800MHz configuration */
+static const struct pll_config gpu_pll0_config = {
+ .l = 0x29,
+ .config_ctl_val = 0x4001055b,
+ .alpha = 0xaaaaab00,
+ .alpha_u = 0xaa,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = 0x3 << 20,
+ .main_output_mask = 0x1,
+};
+
+static struct pll_vco_data pll_data[] = {
+ /* Frequency post-div */
+ { 640000000, 0x1 },
+};
+
+static struct clk_alpha_pll gpu_pll0_pll_out_main = {
+ .offset = 0x0,
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .vco_data = pll_data,
+ .num_vco_data = ARRAY_SIZE(pll_data),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_pll0_pll_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ VDD_GPU_PLL_FMAX_MAP6(
+ MIN, 266000000,
+ LOWER, 432000000,
+ LOW, 640000000,
+ LOW_L1, 800000000,
+ NOMINAL, 1020000000,
+ HIGH, 1500000000),
+ },
+ },
+};
+
+static struct clk_alpha_pll gpu_pll1_pll_out_main = {
+ .offset = 0x40,
+ .vco_table = gpu_vco,
+ .num_vco = ARRAY_SIZE(gpu_vco),
+ .vco_data = pll_data,
+ .num_vco_data = ARRAY_SIZE(pll_data),
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_pll1_pll_out_main",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ VDD_GPU_PLL_FMAX_MAP6(
+ MIN, 266000000,
+ LOWER, 432000000,
+ LOW, 640000000,
+ LOW_L1, 800000000,
+ NOMINAL, 1020000000,
+ HIGH, 1500000000),
+ },
+ },
+};
+
+/* GFX clock init data */
+static struct clk_init_data gpu_clks_init[] = {
+ [0] = {
+ .name = "gfx3d_clk_src",
+ .parent_names = gpucc_parent_names_1,
+ .num_parents = 3,
+ .ops = &clk_gfx3d_src_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ [1] = {
+ .name = "gpucc_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ .vdd_class = &vdd_gfx,
+ },
+};
+
+/*
+ * Frequencies and PLL configuration
+ * The PLL source would be to ping-pong between GPU-PLL0
+ * and GPU-PLL1.
+ * ====================================================
+ * | F | PLL SRC Freq | PLL postdiv | RCG Div |
+ * ====================================================
+ * | 160000000 | 640000000 | 2 | 2 |
+ * | 266000000 | 532000000 | 1 | 2 |
+ * | 370000000 | 740000000 | 1 | 2 |
+ * | 465000000 | 930000000 | 1 | 2 |
+ * | 588000000 | 1176000000 | 1 | 2 |
+ * | 647000000 | 1294000000 | 1 | 2 |
+ * | 750000000 | 1500000000 | 1 | 2 |
+ * ====================================================
+*/
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F_GFX( 19200000, 0, 1, 0, 0, 0),
+ F_GFX(160000000, 0, 2, 0, 0, 640000000),
+ F_GFX(266000000, 0, 2, 0, 0, 532000000),
+ F_GFX(370000000, 0, 2, 0, 0, 740000000),
+ F_GFX(465000000, 0, 2, 0, 0, 930000000),
+ F_GFX(588000000, 0, 2, 0, 0, 1176000000),
+ F_GFX(647000000, 0, 2, 0, 0, 1294000000),
+ F_GFX(750000000, 0, 2, 0, 0, 1500000000),
+ { }
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x1070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .parent_map = gpucc_parent_map_1,
+ .flags = FORCE_ENABLE_RCGR,
+ .clkr.hw.init = &gpu_clks_init[0],
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_names = gpucc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP1(MIN, 19200000),
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x1030,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpucc_parent_map_0,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_names = gpucc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ VDD_DIG_FMAX_MAP2(
+ MIN, 19200000,
+ NOMINAL, 50000000),
+ },
+};
+
+static struct clk_branch gpucc_cxo_clk = {
+ .halt_reg = 0x1020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_cxo_clk",
+ .parent_names = (const char *[]) {
+ "cxo_a",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_gfx3d_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &gpu_clks_init[1],
+ },
+};
+
+static struct clk_branch gpucc_rbbmtimer_clk = {
+ .halt_reg = 0x10d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbbmtimer_clk",
+ .parent_names = (const char *[]){
+ "rbbmtimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpucc_rbcpr_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpucc_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpucc_falcon_clocks[] = {
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GPU_PLL0_PLL] = &gpu_pll0_pll_out_main.clkr,
+ [GPU_PLL1_PLL] = &gpu_pll1_pll_out_main.clkr,
+ [GPUCC_CXO_CLK] = &gpucc_cxo_clk.clkr,
+ [GPUCC_GFX3D_CLK] = &gpucc_gfx3d_clk.clkr,
+ [GPUCC_RBBMTIMER_CLK] = &gpucc_rbbmtimer_clk.clkr,
+ [GPUCC_RBCPR_CLK] = &gpucc_rbcpr_clk.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+};
+
+static const struct regmap_config gpucc_falcon_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9034,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpucc_falcon_desc = {
+ .config = &gpucc_falcon_regmap_config,
+ .clks = gpucc_falcon_clocks,
+ .num_clks = ARRAY_SIZE(gpucc_falcon_clocks),
+};
+
+static const struct of_device_id gpucc_falcon_match_table[] = {
+ { .compatible = "qcom,gpucc-msmfalcon" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_falcon_match_table);
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev,
+ struct clk_hw *hw, char *prop_name, u32 index)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = hw->init->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ gpu_clks_init[index].fmax = devm_kzalloc(&pdev->dev, prop_len *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!gpu_clks_init[index].fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev, prop_len * sizeof(u32) * num,
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ gpu_clks_init[index].fmax[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ gpu_clks_init[index].num_fmax = prop_len;
+
+ return 0;
+}
+
+static int gpucc_falcon_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gpucc_falcon_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* CX Regulator for RBBMTimer and RBCPR clock */
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig_gfx");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator\n");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ /* Mx Regulator for GPU-PLLs */
+ vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_gfx");
+ if (IS_ERR(vdd_mx.regulator[0])) {
+ if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(vdd_mx.regulator[0]);
+ }
+
+ /* GFX Rail Regulator for GFX3D clock */
+ vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+ if (IS_ERR(vdd_gfx.regulator[0])) {
+ if (!(PTR_ERR(vdd_gfx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_gfx regulator\n");
+ return PTR_ERR(vdd_gfx.regulator[0]);
+ }
+
+ /* GFX rail fmax data linked to branch clock */
+ of_get_fmax_vdd_class(pdev, &gpucc_gfx3d_clk.clkr.hw,
+ "qcom,gfxfreq-corner", 1);
+
+ clk_alpha_pll_configure(&gpu_pll0_pll_out_main, regmap,
+ &gpu_pll0_config);
+ clk_alpha_pll_configure(&gpu_pll1_pll_out_main, regmap,
+ &gpu_pll0_config);
+
+ ret = qcom_cc_really_probe(pdev, &gpucc_falcon_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPUCC clocks\n");
+ return ret;
+ }
+
+ clk_prepare_enable(gpucc_cxo_clk.clkr.hw.clk);
+
+ dev_info(&pdev->dev, "Registered GPUCC clocks\n");
+
+ return ret;
+}
+
+static struct platform_driver gpucc_falcon_driver = {
+ .probe = gpucc_falcon_probe,
+ .driver = {
+ .name = "gpucc-msmfalcon",
+ .of_match_table = gpucc_falcon_match_table,
+ },
+};
+
+static int __init gpucc_falcon_init(void)
+{
+ return platform_driver_register(&gpucc_falcon_driver);
+}
+core_initcall_sync(gpucc_falcon_init);
+
+static void __exit gpucc_falcon_exit(void)
+{
+ platform_driver_unregister(&gpucc_falcon_driver);
+}
+module_exit(gpucc_falcon_exit);
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index ced95aa2b649..37e504381313 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -85,8 +85,6 @@ struct lpm_debug {
struct lpm_cluster *lpm_root_node;
-#define MAXSAMPLES 5
-
static bool lpm_prediction;
module_param_named(lpm_prediction,
lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
@@ -108,6 +106,7 @@ struct lpm_history {
uint32_t hptr;
uint32_t hinvalid;
uint32_t htmr_wkup;
+ int64_t stime;
};
static DEFINE_PER_CPU(struct lpm_history, hist);
@@ -359,9 +358,6 @@ static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
static void histtimer_cancel(void)
{
- if (!lpm_prediction)
- return;
-
hrtimer_try_to_cancel(&histtimer);
}
@@ -383,6 +379,51 @@ static void histtimer_start(uint32_t time_us)
hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
}
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+
+ if (!cluster)
+ return;
+
+ hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ cluster_timer_init(n);
+ }
+}
+
+static void clusttimer_cancel(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ hrtimer_try_to_cancel(&cluster->histtimer);
+ hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+ struct lpm_cluster *cluster = container_of(h,
+ struct lpm_cluster, histtimer);
+
+ cluster->history.hinvalid = 1;
+ return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+ uint64_t time_ns = time_us * NSEC_PER_USEC;
+ ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+ cluster->histtimer.function = clusttimer_fn;
+ hrtimer_start(&cluster->histtimer, clust_ktime,
+ HRTIMER_MODE_REL_PINNED);
+}
+
static void msm_pm_set_timer(uint32_t modified_time_us)
{
u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
@@ -492,14 +533,17 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
return 0;
}
/*
* Predict only when all the samples are collected.
*/
- if (history->nsamp < MAXSAMPLES)
+ if (history->nsamp < MAXSAMPLES) {
+ history->stime = 0;
return 0;
+ }
/*
* Check if the samples are not much deviated, if so use the
@@ -540,6 +584,7 @@ again:
*/
if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
|| stddev <= ref_stddev) {
+ history->stime = ktime_to_us(ktime_get()) + avg;
return avg;
} else if (divisor > (MAXSAMPLES - 1)) {
thresh = max - 1;
@@ -567,6 +612,8 @@ again:
*idx_restrict = j;
do_div(total, failed);
*idx_restrict_time = total;
+ history->stime = ktime_to_us(ktime_get())
+ + *idx_restrict_time;
break;
}
}
@@ -584,6 +631,7 @@ static inline void invalidate_predict_history(struct cpuidle_device *dev)
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
}
}
@@ -603,6 +651,7 @@ static void clear_predict_history(void)
history->mode[i] = -1;
history->hptr = 0;
history->nsamp = 0;
+ history->stime = 0;
}
}
}
@@ -724,12 +773,14 @@ static int cpu_power_select(struct cpuidle_device *dev,
}
static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
- struct cpumask *mask, bool from_idle)
+ struct cpumask *mask, bool from_idle, uint32_t *pred_time)
{
int cpu;
int next_cpu = raw_smp_processor_id();
ktime_t next_event;
struct cpumask online_cpus_in_cluster;
+ struct lpm_history *history;
+ int64_t prediction = LONG_MAX;
next_event.tv64 = KTIME_MAX;
if (!suspend_wake_time)
@@ -754,11 +805,21 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
next_event.tv64 = next_event_c->tv64;
next_cpu = cpu;
}
+
+ if (from_idle && lpm_prediction) {
+ history = &per_cpu(hist, cpu);
+ if (history->stime && (history->stime < prediction))
+ prediction = history->stime;
+ }
}
if (mask)
cpumask_copy(mask, cpumask_of(next_cpu));
+ if (from_idle && lpm_prediction) {
+ if (prediction > ktime_to_us(ktime_get()))
+ *pred_time = prediction - ktime_to_us(ktime_get());
+ }
if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
return ktime_to_us(ktime_sub(next_event, ktime_get()));
@@ -766,18 +827,193 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
return 0;
}
-static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+static int cluster_predict(struct lpm_cluster *cluster,
+ uint32_t *pred_us)
+{
+ int i, j;
+ int ret = 0;
+ struct cluster_history *history = &cluster->history;
+ int64_t cur_time = ktime_to_us(ktime_get());
+
+ if (!lpm_prediction)
+ return 0;
+
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->nsamp == MAXSAMPLES) {
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((cur_time - history->stime[i])
+ > CLUST_SMPL_INVLD_TIME)
+ history->nsamp--;
+ }
+ }
+
+ if (history->nsamp < MAXSAMPLES) {
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->flag == 2)
+ history->flag = 0;
+
+ if (history->htmr_wkup != 1) {
+ uint64_t total = 0;
+
+ if (history->flag == 1) {
+ for (i = 0; i < MAXSAMPLES; i++)
+ total += history->resi[i];
+ do_div(total, MAXSAMPLES);
+ *pred_us = total;
+ return 2;
+ }
+
+ for (j = 1; j < cluster->nlevels; j++) {
+ uint32_t failed = 0;
+
+ total = 0;
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((history->mode[i] == j) && (history->resi[i]
+ < cluster->levels[j].pwr.min_residency)) {
+ failed++;
+ total += history->resi[i];
+ }
+ }
+
+ if (failed > (MAXSAMPLES-2)) {
+ do_div(total, failed);
+ *pred_us = total;
+ history->flag = 1;
+ return 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+ int idx, uint64_t start)
+{
+ history->entry_idx = idx;
+ history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+ uint32_t tmr = 0;
+ uint32_t residency = 0;
+ struct lpm_cluster *cluster =
+ container_of(history, struct lpm_cluster, history);
+
+ if (!lpm_prediction)
+ return;
+
+ if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+ residency = ktime_to_us(ktime_get()) - history->entry_time;
+ history->stime[history->hptr] = history->entry_time;
+ } else
+ return;
+
+ if (history->htmr_wkup) {
+ if (!history->hptr)
+ history->hptr = MAXSAMPLES-1;
+ else
+ history->hptr--;
+
+ history->resi[history->hptr] += residency;
+
+ history->htmr_wkup = 0;
+ tmr = 1;
+ } else {
+ history->resi[history->hptr] = residency;
+ }
+
+ history->mode[history->hptr] = idx;
+
+ history->entry_idx = INT_MIN;
+ history->entry_time = 0;
+
+ if (history->nsamp < MAXSAMPLES)
+ history->nsamp++;
+
+ trace_cluster_pred_hist(cluster->cluster_name,
+ history->mode[history->hptr], history->resi[history->hptr],
+ history->hptr, tmr);
+
+ (history->hptr)++;
+
+ if (history->hptr >= MAXSAMPLES)
+ history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+ int i;
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ history->resi[i] = 0;
+ history->mode[i] = -1;
+ history->stime[i] = 0;
+ }
+ history->hptr = 0;
+ history->nsamp = 0;
+ history->flag = 0;
+ history->hinvalid = 0;
+ history->htmr_wkup = 0;
+}
+
+static void clear_cl_predict_history(void)
+{
+ struct lpm_cluster *cluster = lpm_root_node;
+ struct list_head *list;
+
+ if (!lpm_prediction)
+ return;
+
+ clear_cl_history_each(&cluster->history);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ clear_cl_history_each(&n->history);
+ }
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+ int *ispred)
{
int best_level = -1;
int i;
struct cpumask mask;
uint32_t latency_us = ~0U;
uint32_t sleep_us;
+ uint32_t cpupred_us = 0, pred_us = 0;
+ int pred_mode = 0, predicted = 0;
if (!cluster)
return -EINVAL;
- sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
+ from_idle, &cpupred_us);
+
+ if (from_idle) {
+ pred_mode = cluster_predict(cluster, &pred_us);
+
+ if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+ pred_us = cpupred_us;
+
+ if (pred_us && pred_mode && (pred_us < sleep_us))
+ predicted = 1;
+
+ if (predicted && (pred_us == cpupred_us))
+ predicted = 2;
+ }
if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
@@ -823,10 +1059,19 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
best_level = i;
- if (sleep_us <= pwr_params->max_residency)
+ if (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency))
break;
}
+ if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+ cluster->history.flag = 2;
+
+ *ispred = predicted;
+
+ trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+ latency_us, predicted, pred_us);
+
return best_level;
}
@@ -840,7 +1085,7 @@ static void cluster_notify(struct lpm_cluster *cluster,
}
static int cluster_configure(struct lpm_cluster *cluster, int idx,
- bool from_idle)
+ bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
int ret, i;
@@ -858,6 +1103,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
lpm_stats_cluster_enter(cluster->stats, idx);
+
+ if (from_idle && lpm_prediction)
+ update_cluster_history_time(&cluster->history, idx,
+ ktime_to_us(ktime_get()));
}
for (i = 0; i < cluster->ndevices; i++) {
@@ -869,8 +1118,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
if (level->notify_rpm) {
struct cpumask nextcpu, *cpumask;
uint64_t us;
+ uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu, from_idle);
+ us = get_cluster_sleep_time(cluster, &nextcpu,
+ from_idle, &pred_us);
cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
ret = msm_rpm_enter_sleep(0, cpumask);
@@ -881,6 +1132,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
us = us + 1;
clear_predict_history();
+ clear_cl_predict_history();
+
do_div(us, USEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
@@ -891,6 +1144,15 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
cluster->last_level = idx;
+
+ if (predicted && (idx < (cluster->nlevels - 1))) {
+ struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+
return 0;
failed_set_mode:
@@ -909,6 +1171,7 @@ static void cluster_prepare(struct lpm_cluster *cluster,
int64_t start_time)
{
int i;
+ int predicted = 0;
if (!cluster)
return;
@@ -939,12 +1202,28 @@ static void cluster_prepare(struct lpm_cluster *cluster,
&cluster->child_cpus))
goto failed;
- i = cluster_select(cluster, from_idle);
+ i = cluster_select(cluster, from_idle, &predicted);
+
+ if (((i < 0) || (i == cluster->default_level))
+ && predicted && from_idle) {
+ update_cluster_history_time(&cluster->history,
+ -1, ktime_to_us(ktime_get()));
+
+ if (i < 0) {
+ struct power_params *pwr_params =
+ &cluster->levels[0].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster,
+ pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+ }
if (i < 0)
goto failed;
- if (cluster_configure(cluster, i, from_idle))
+ if (cluster_configure(cluster, i, from_idle, predicted))
goto failed;
cluster->stats->sleep_time = start_time;
@@ -988,6 +1267,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
&lvl->num_cpu_votes, cpu);
}
+ if (from_idle && first_cpu &&
+ (cluster->last_level == cluster->default_level))
+ update_cluster_history(&cluster->history, cluster->last_level);
+
if (!first_cpu || cluster->last_level == cluster->default_level)
goto unlock_return;
@@ -1029,6 +1312,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
cluster_notify(cluster, &cluster->levels[last_level], false);
+
+ if (from_idle)
+ update_cluster_history(&cluster->history, last_level);
+
cluster_unprepare(cluster->parent, &cluster->child_cpus,
last_level, from_idle, end_time);
unlock_return:
@@ -1288,7 +1575,10 @@ exit:
update_history(dev, idx);
trace_cpu_idle_exit(idx, success);
local_irq_enable();
- histtimer_cancel();
+ if (lpm_prediction) {
+ histtimer_cancel();
+ clusttimer_cancel();
+ }
return idx;
}
@@ -1561,6 +1851,7 @@ static int lpm_probe(struct platform_device *pdev)
suspend_set_ops(&lpm_suspend_ops);
hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cluster_timer_init(lpm_root_node);
ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
if (ret) {
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 63fe0a0fbc08..3c9665ea8981 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -14,6 +14,8 @@
#include <soc/qcom/spm.h>
#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
extern bool use_psci;
@@ -85,6 +87,19 @@ struct low_power_ops {
enum msm_pm_l2_scm_flag tz_flag;
};
+struct cluster_history {
+ uint32_t resi[MAXSAMPLES];
+ int mode[MAXSAMPLES];
+ int64_t stime[MAXSAMPLES];
+ uint32_t hptr;
+ uint32_t hinvalid;
+ uint32_t htmr_wkup;
+ uint64_t entry_time;
+ int entry_idx;
+ int nsamp;
+ int flag;
+};
+
struct lpm_cluster {
struct list_head list;
struct list_head child;
@@ -109,6 +124,8 @@ struct lpm_cluster {
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
+ struct cluster_history history;
+ struct hrtimer histtimer;
};
int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
index da1eefb8c94e..33e144b653d0 100644
--- a/drivers/devfreq/governor_bw_vbif.c
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -78,15 +78,13 @@ static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_START:
mutex_lock(&df_lock);
df = devfreq;
- if (df->profile->get_dev_status)
- ret = df->profile->get_dev_status(df->dev.parent,
- &stat);
+ if (df->profile->get_dev_status &&
+ !df->profile->get_dev_status(df->dev.parent, &stat) &&
+ stat.private_data)
+ dev_ab = stat.private_data;
else
- ret = 0;
- if (ret || !stat.private_data)
pr_warn("Device doesn't take AB votes!\n");
- else
- dev_ab = stat.private_data;
+
mutex_unlock(&df_lock);
ret = devfreq_vbif_update_bw(0, 0);
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
index 010f9defe33e..a3c826e152e1 100644
--- a/drivers/devfreq/governor_memlat.c
+++ b/drivers/devfreq/governor_memlat.c
@@ -81,6 +81,29 @@ show_attr(__attr) \
store_attr(__attr, min, max) \
static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+ struct memlat_node *n = df->data;
+ struct core_dev_map *map = n->hw->freq_map;
+ unsigned int cnt = 0;
+
+ cnt += snprintf(buf, PAGE_SIZE, "Core freq (MHz)\tDevice BW\n");
+
+ while (map->core_mhz && cnt < PAGE_SIZE) {
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%15u\t%9u\n",
+ map->core_mhz, map->target_freq);
+ map++;
+ }
+ if (cnt < PAGE_SIZE)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+
+ return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+
static unsigned long core_to_dev_freq(struct memlat_node *node,
unsigned long coref)
{
@@ -247,6 +270,7 @@ gov_attr(ratio_ceil, 1U, 10000U);
static struct attribute *dev_attr[] = {
&dev_attr_ratio_ceil.attr,
+ &dev_attr_freq_map.attr,
NULL,
};
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 90aee3cad5ad..625a2640b4c4 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android
msm_kgsl_core-y = \
kgsl.o \
kgsl_trace.o \
- kgsl_cmdbatch.o \
+ kgsl_drawobj.o \
kgsl_ioctl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index 3b29452ce8bd..f3b4e6622043 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -640,6 +640,7 @@
/* UCHE registers */
#define A5XX_UCHE_ADDR_MODE_CNTL 0xE80
+#define A5XX_UCHE_MODE_CNTL 0xE81
#define A5XX_UCHE_WRITE_THRU_BASE_LO 0xE87
#define A5XX_UCHE_WRITE_THRU_BASE_HI 0xE88
#define A5XX_UCHE_TRAP_BASE_LO 0xE89
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1356835d0e93..9940f7a7c2b7 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -40,6 +40,7 @@
/* Include the master list of GPU cores that are supported */
#include "adreno-gpulist.h"
+#include "adreno_dispatch.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "adreno."
@@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv))
- kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer);
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
+ kgsl_free_global(device, &adreno_dev->profile_buffer);
/* Free local copies of firmware and other command streams */
kfree(adreno_dev->pfp_fw);
@@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device)
}
/*
- * Allocate a small chunk of memory for precise cmdbatch profiling for
+ * Allocate a small chunk of memory for precise drawobj profiling for
* those targets that have the always on timer
*/
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ &adreno_dev->profile_buffer, PAGE_SIZE,
0, 0, "alwayson");
- adreno_dev->cmdbatch_profile_index = 0;
+ adreno_dev->profile_index = 0;
if (r == 0) {
- set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE,
+ set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
&adreno_dev->priv);
kgsl_sharedmem_set(device,
- &adreno_dev->cmdbatch_profile_buffer, 0, 0,
+ &adreno_dev->profile_buffer, 0, 0,
PAGE_SIZE);
}
@@ -1242,86 +1243,6 @@ static bool regulators_left_on(struct kgsl_device *device)
return false;
}
-static void _setup_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, ret;
-
- if (!adreno_is_a540(adreno_dev))
- return;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- /* reset throttled cycles ivalue */
- adreno_dev->busy_data.throttle_cycles[i] = 0;
-
- if (adreno_dev->gpmu_throttle_counters[i] != 0)
- continue;
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
- &adreno_dev->gpmu_throttle_counters[i],
- NULL,
- PERFCOUNTER_FLAG_KERNEL);
- WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
- }
-}
-
-/* FW driven idle 10% throttle */
-#define IDLE_10PCT 0
-/* number of cycles when clock is throttled by 50% (CRC) */
-#define CRC_50PCT 1
-/* number of cycles when clock is throttled by more than 50% (CRC) */
-#define CRC_MORE50PCT 2
-/* number of cycles when clock is throttle by less than 50% (CRC) */
-#define CRC_LESS50PCT 3
-
-static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, adj;
- uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
- struct adreno_busy_data *busy = &adreno_dev->busy_data;
-
- if (!adreno_is_a540(adreno_dev))
- return 0;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return 0;
-
- if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
- return 0;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- if (!adreno_dev->gpmu_throttle_counters[i])
- return 0;
-
- th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpmu_throttle_counters[i],
- &busy->throttle_cycles[i]);
- }
- adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
- adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
-
- trace_kgsl_clock_throttling(
- th[IDLE_10PCT], th[CRC_50PCT],
- th[CRC_MORE50PCT], th[CRC_LESS50PCT],
- adj);
- return adj;
-}
-
-static void _update_threshold_count(struct adreno_device *adreno_dev,
- uint64_t adj)
-{
- if (adreno_is_a530(adreno_dev))
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- adreno_dev->lm_threshold_count,
- &adreno_dev->lm_threshold_cross);
- else if (adreno_is_a540(adreno_dev))
- adreno_dev->lm_threshold_cross = adj;
-}
-
static void _set_secvid(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1418,8 +1339,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
- if (device->pwrctrl.bus_control) {
+ if (device->pwrctrl.bus_control) {
/* VBIF waiting for RAM */
if (adreno_dev->starved_ram_lo == 0) {
ret = adreno_perfcounter_get(adreno_dev,
@@ -1455,20 +1376,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
adreno_dev->busy_data.vbif_ram_cycles = 0;
adreno_dev->busy_data.vbif_starved_ram = 0;
- if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
- && adreno_dev->lm_threshold_count == 0) {
-
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
- &adreno_dev->lm_threshold_count, NULL,
- PERFCOUNTER_FLAG_KERNEL);
- /* Ignore noncritical ret - used for debugfs */
- if (ret)
- adreno_dev->lm_threshold_count = 0;
- }
-
- _setup_throttling_counters(adreno_dev);
-
/* Restore performance counter registers with saved values */
adreno_perfcounter_restore(adreno_dev);
@@ -1653,14 +1560,9 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
int adreno_reset(struct kgsl_device *device, int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret = -EINVAL;
int i = 0;
- /* broadcast to HW - reset is coming */
- if (gpudev->pre_reset)
- gpudev->pre_reset(adreno_dev);
-
/* Try soft reset first */
if (adreno_try_soft_reset(device, fault)) {
/* Make sure VBIF is cleared before resetting */
@@ -2059,7 +1961,7 @@ static int adreno_setproperty(struct kgsl_device_private *dev_priv,
KGSL_STATE_ACTIVE);
device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
adreno_fault_detect_stop(adreno_dev);
- kgsl_pwrscale_disable(device);
+ kgsl_pwrscale_disable(device, true);
}
mutex_unlock(&device->mutex);
@@ -2340,12 +2242,12 @@ int adreno_idle(struct kgsl_device *device)
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
- * Drain the dispatcher of existing command batches. This halts
+ * Drain the dispatcher of existing drawobjs. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
- reinit_completion(&device->cmdbatch_gate);
+ reinit_completion(&device->halt_gate);
return 0;
}
@@ -2580,27 +2482,6 @@ static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
return ticks / freq;
}
-static unsigned int counter_delta(struct kgsl_device *device,
- unsigned int reg, unsigned int *counter)
-{
- unsigned int val;
- unsigned int ret = 0;
-
- /* Read the value */
- kgsl_regread(device, reg, &val);
-
- /* Return 0 for the first read */
- if (*counter != 0) {
- if (val < *counter)
- ret = (0xFFFFFFFF - *counter) + val;
- else
- ret = val - *counter;
- }
-
- *counter = val;
- return ret;
-}
-
/**
* adreno_power_stats() - Reads the counters needed for freq decisions
* @device: Pointer to device whose counters are read
@@ -2612,6 +2493,7 @@ static void adreno_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
uint64_t adj = 0;
@@ -2625,8 +2507,11 @@ static void adreno_power_stats(struct kgsl_device *device,
gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
&busy->gpu_busy);
- adj = _read_throttling_counters(adreno_dev);
- gpu_busy += adj;
+ if (gpudev->read_throttling_counters) {
+ adj = gpudev->read_throttling_counters(adreno_dev);
+ gpu_busy += adj;
+ }
+
stats->busy_time = adreno_ticks_to_us(gpu_busy,
kgsl_pwrctrl_active_freq(pwr));
}
@@ -2647,8 +2532,9 @@ static void adreno_power_stats(struct kgsl_device *device,
stats->ram_time = ram_cycles;
stats->ram_wait = starved_ram;
}
- if (adreno_dev->lm_threshold_count)
- _update_threshold_count(adreno_dev, adj);
+ if (adreno_dev->lm_threshold_count &&
+ gpudev->count_throttles)
+ gpudev->count_throttles(adreno_dev, adj);
}
static unsigned int adreno_gpuid(struct kgsl_device *device,
@@ -2825,7 +2711,7 @@ static const struct kgsl_functable adreno_functable = {
.getproperty_compat = adreno_getproperty_compat,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
- .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .queue_cmds = adreno_dispatcher_queue_cmds,
.ioctl = adreno_ioctl,
.compat_ioctl = adreno_compat_ioctl,
.power_stats = adreno_power_stats,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d4858f3f818e..0f3403cb0095 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -76,13 +76,13 @@
KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
/*
- * return the dispatcher cmdqueue in which the given cmdbatch should
+ * return the dispatcher drawqueue in which the given drawobj should
* be submitted
*/
-#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \
+#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
(&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
-#define ADRENO_CMDBATCH_RB(c) \
+#define ADRENO_DRAWOBJ_RB(c) \
((ADRENO_CONTEXT(c->context))->rb)
/* Adreno core features */
@@ -346,8 +346,8 @@ struct adreno_gpu_core {
* @halt: Atomic variable to check whether the GPU is currently halted
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
- * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer
- * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling
+ * @profile_buffer: Memdesc holding the drawobj profiling buffer
+ * @profile_index: Index to store the start/stop ticks in the profiling
* buffer
* @sp_local_gpuaddr: Base GPU virtual address for SP local memory
* @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
@@ -404,8 +404,8 @@ struct adreno_device {
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
- struct kgsl_memdesc cmdbatch_profile_buffer;
- unsigned int cmdbatch_profile_index;
+ struct kgsl_memdesc profile_buffer;
+ unsigned int profile_index;
uint64_t sp_local_gpuaddr;
uint64_t sp_pvt_gpuaddr;
const struct firmware *lm_fw;
@@ -441,7 +441,7 @@ struct adreno_device {
* @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
* @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
* send any more commands to the ringbuffer)
- * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch
+ * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
* profiling via the ALWAYSON counter
* @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
* @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
@@ -459,7 +459,7 @@ enum adreno_device_flags {
ADRENO_DEVICE_HANG_INTR = 4,
ADRENO_DEVICE_STARTED = 5,
ADRENO_DEVICE_FAULT = 6,
- ADRENO_DEVICE_CMDBATCH_PROFILE = 7,
+ ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
ADRENO_DEVICE_PREEMPTION = 9,
ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
@@ -469,22 +469,22 @@ enum adreno_device_flags {
};
/**
- * struct adreno_cmdbatch_profile_entry - a single command batch entry in the
+ * struct adreno_drawobj_profile_entry - a single drawobj entry in the
* kernel profiling buffer
- * @started: Number of GPU ticks at start of the command batch
- * @retired: Number of GPU ticks at the end of the command batch
+ * @started: Number of GPU ticks at start of the drawobj
+ * @retired: Number of GPU ticks at the end of the drawobj
*/
-struct adreno_cmdbatch_profile_entry {
+struct adreno_drawobj_profile_entry {
uint64_t started;
uint64_t retired;
};
-#define ADRENO_CMDBATCH_PROFILE_COUNT \
- (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry))
+#define ADRENO_DRAWOBJ_PROFILE_COUNT \
+ (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
-#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \
- ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \
- + offsetof(struct adreno_cmdbatch_profile_entry, _member))
+#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
+ ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
+ + offsetof(struct adreno_drawobj_profile_entry, _member))
/**
@@ -756,6 +756,10 @@ struct adreno_gpudev {
void (*pwrlevel_change_settings)(struct adreno_device *,
unsigned int prelevel, unsigned int postlevel,
bool post);
+ uint64_t (*read_throttling_counters)(struct adreno_device *);
+ void (*count_throttles)(struct adreno_device *, uint64_t adj);
+ int (*enable_pwr_counters)(struct adreno_device *,
+ unsigned int counter);
unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *,
struct adreno_ringbuffer *rb,
unsigned int *, struct kgsl_context *);
@@ -765,7 +769,6 @@ struct adreno_gpudev {
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
- void (*pre_reset)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
const char *, struct clk *);
};
@@ -776,7 +779,7 @@ struct adreno_gpudev {
* @KGSL_FT_REPLAY: Replay the faulting command
* @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
* @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
- * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch
+ * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
* @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
* @KGSL_FT_THROTTLE: Disable the context if it faults too often
* @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
@@ -793,7 +796,7 @@ enum kgsl_ft_policy_bits {
/* KGSL_FT_MAX_BITS is used to calculate the mask */
KGSL_FT_MAX_BITS,
/* Internal bits - set during GFT */
- /* Skip the PM dump on replayed command batches */
+ /* Skip the PM dump on replayed command obj's */
KGSL_FT_SKIP_PMDUMP = 31,
};
@@ -882,7 +885,7 @@ int adreno_reset(struct kgsl_device *device, int fault);
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
int adreno_coresight_init(struct adreno_device *adreno_dev);
@@ -1467,4 +1470,24 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
spin_unlock_irqrestore(&rb->preempt_lock, flags);
}
+static inline unsigned int counter_delta(struct kgsl_device *device,
+ unsigned int reg, unsigned int *counter)
+{
+ unsigned int val;
+ unsigned int ret = 0;
+
+ /* Read the value */
+ kgsl_regread(device, reg, &val);
+
+ /* Return 0 for the first read */
+ if (*counter != 0) {
+ if (val < *counter)
+ ret = (0xFFFFFFFF - *counter) + val;
+ else
+ ret = val - *counter;
+ }
+
+ *counter = val;
+ return ret;
+}
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 8ac058a7c5b0..2891940b8f5b 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -27,6 +27,7 @@
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl.h"
+#include "kgsl_trace.h"
#include "adreno_a5xx_packets.h"
static int zap_ucode_loaded;
@@ -1406,105 +1407,10 @@ static void a530_lm_enable(struct adreno_device *adreno_dev)
adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
}
-static bool llm_is_enabled(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- return r & (GPMU_BCL_ENABLED | GPMU_LLM_ENABLED);
-}
-
-
-static void sleep_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, &r);
-
- if ((r & STATE_OF_CHILD) == 0) {
- /* If both children are on, sleep CHILD_O1 first */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01 | IDLE_FULL_LM_SLEEP);
- /* Wait for IDLE_FULL_ACK before continuing */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device,
- A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if (r & IDLE_FULL_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to idle: 0x%X\n", r);
- }
-
- /* Now turn off both children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- 0, STATE_OF_CHILD | IDLE_FULL_LM_SLEEP);
-
- /* wait for WAKEUP_ACK to be zero */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & WAKEUP_ACK) == 0)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to sleep: 0x%X\n", r);
-}
-
-static void wake_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01);
-
- if (((device->pwrctrl.num_pwrlevels - 2) -
- device->pwrctrl.active_pwrlevel) <= LM_DCVS_LIMIT)
- return;
-
- udelay(1);
-
- /* Turn on all children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD | IDLE_FULL_LM_SLEEP, 0);
-
- /* Wait for IDLE_FULL_ACK to be zero and WAKEUP_ACK to be set */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & (WAKEUP_ACK | IDLE_FULL_ACK)) == WAKEUP_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to wake: 0x%X\n", r);
-}
-
-static bool llm_is_awake(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- return r & WAKEUP_ACK;
-}
-
static void a540_lm_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t agc_lm_config =
+ uint32_t agc_lm_config = AGC_BCL_DISABLED |
((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
<< AGC_GPU_VERSION_SHIFT);
unsigned int r;
@@ -1518,11 +1424,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
AGC_LM_CONFIG_ISENSE_ENABLE;
kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- if (!(r & GPMU_BCL_ENABLED))
- agc_lm_config |= AGC_BCL_DISABLED;
-
- if (r & GPMU_LLM_ENABLED)
- agc_lm_config |= AGC_LLM_ENABLED;
if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
KGSL_CORE_ERR(
@@ -1551,9 +1452,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
VOLTAGE_INTR_EN);
-
- if (lm_on(adreno_dev))
- wake_llm(adreno_dev);
}
@@ -1646,6 +1544,76 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
}
}
+static void a5xx_count_throttles(struct adreno_device *adreno_dev,
+ uint64_t adj)
+{
+ if (adreno_is_a530(adreno_dev))
+ kgsl_regread(KGSL_DEVICE(adreno_dev),
+ adreno_dev->lm_threshold_count,
+ &adreno_dev->lm_threshold_cross);
+ else if (adreno_is_a540(adreno_dev))
+ adreno_dev->lm_threshold_cross = adj;
+}
+
+static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ /*
+ * On 5XX we have to emulate the PWR counters which are physically
+ * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
+ * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
+ * to take away too many of the generic RBBM counters.
+ */
+
+ if (counter == 0)
+ return -EINVAL;
+
+ kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
+ return 0;
+}
+
+/* FW driven idle 10% throttle */
+#define IDLE_10PCT 0
+/* number of cycles when clock is throttled by 50% (CRC) */
+#define CRC_50PCT 1
+/* number of cycles when clock is throttled by more than 50% (CRC) */
+#define CRC_MORE50PCT 2
+/* number of cycles when clock is throttle by less than 50% (CRC) */
+#define CRC_LESS50PCT 3
+
+static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, adj;
+ uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
+ struct adreno_busy_data *busy = &adreno_dev->busy_data;
+
+ if (!adreno_is_a540(adreno_dev))
+ return 0;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return 0;
+
+ if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
+ return 0;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ if (!adreno_dev->gpmu_throttle_counters[i])
+ return 0;
+
+ th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpmu_throttle_counters[i],
+ &busy->throttle_cycles[i]);
+ }
+ adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
+ adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
+
+ trace_kgsl_clock_throttling(
+ th[IDLE_10PCT], th[CRC_50PCT],
+ th[CRC_MORE50PCT], th[CRC_LESS50PCT],
+ adj);
+ return adj;
+}
static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
{
@@ -1665,14 +1633,6 @@ static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-static void a5xx_pre_reset(struct adreno_device *adreno_dev)
-{
- if (adreno_is_a540(adreno_dev) && lm_on(adreno_dev)) {
- if (llm_is_awake(adreno_dev))
- sleep_llm(adreno_dev);
- }
-}
-
/*
* a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
* @work: Pointer to the work struct for gpmu reset
@@ -1707,17 +1667,47 @@ static void a5xx_gpmu_reset(struct work_struct *work)
if (a5xx_regulator_enable(adreno_dev))
goto out;
- a5xx_pre_reset(adreno_dev);
-
/* Soft reset of the GPMU block */
kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
+ /* GPU comes up in secured mode, make it unsecured by default */
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+ kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+
a5xx_gpmu_init(adreno_dev);
out:
mutex_unlock(&device->mutex);
}
+static void _setup_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, ret;
+
+ if (!adreno_is_a540(adreno_dev))
+ return;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ /* reset throttled cycles ivalue */
+ adreno_dev->busy_data.throttle_cycles[i] = 0;
+
+ if (adreno_dev->gpmu_throttle_counters[i] != 0)
+ continue;
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
+ &adreno_dev->gpmu_throttle_counters[i],
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
+ }
+}
+
/*
* a5xx_start() - Device start
* @adreno_dev: Pointer to adreno device
@@ -1729,6 +1719,21 @@ static void a5xx_start(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned int bit;
+ int ret;
+
+ if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
+ && adreno_dev->lm_threshold_count == 0) {
+
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
+ &adreno_dev->lm_threshold_count, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ /* Ignore noncritical ret - used for debugfs */
+ if (ret)
+ adreno_dev->lm_threshold_count = 0;
+ }
+
+ _setup_throttling_counters(adreno_dev);
adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
ARRAY_SIZE(a5xx_vbif_platforms));
@@ -1875,6 +1880,11 @@ static void a5xx_start(struct adreno_device *adreno_dev)
*/
kgsl_regrmw(device, A5XX_RB_DBG_ECO_CNT, 0, (1 << 9));
}
+ /*
+ * Disable UCHE global filter as SP can invalidate/flush
+ * independently
+ */
+ kgsl_regwrite(device, A5XX_UCHE_MODE_CNTL, BIT(29));
/* Set the USE_RETENTION_FLOPS chicken bit */
kgsl_regwrite(device, A5XX_CP_CHICKEN_DBG, 0x02000000);
@@ -2029,11 +2039,6 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
{
int ret;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- /* GPU comes up in secured mode, make it unsecured by default */
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
- kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
/* Set up LM before initializing the GPMU */
a5xx_lm_init(adreno_dev);
@@ -2354,20 +2359,10 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
if (ret)
return ret;
- /* Set up LM before initializing the GPMU */
- a5xx_lm_init(adreno_dev);
-
- /* Enable SPTP based power collapse before enabling GPMU */
- a5xx_enable_pc(adreno_dev);
-
- /* Program the GPMU */
- ret = a5xx_gpmu_start(adreno_dev);
+ ret = a5xx_gpmu_init(adreno_dev);
if (ret)
return ret;
- /* Enable limits management */
- a5xx_lm_enable(adreno_dev);
-
a5xx_post_start(adreno_dev);
return 0;
@@ -3529,6 +3524,9 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.regulator_enable = a5xx_regulator_enable,
.regulator_disable = a5xx_regulator_disable,
.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
+ .read_throttling_counters = a5xx_read_throttling_counters,
+ .count_throttles = a5xx_count_throttles,
+ .enable_pwr_counters = a5xx_enable_pwr_counters,
.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
.preemption_yield_enable =
a5xx_preemption_yield_enable,
@@ -3537,6 +3535,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
- .pre_reset = a5xx_pre_reset,
.clk_set_options = a5xx_clk_set_options,
};
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4baee4a5c0b1..09c550c9f58c 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev)
rb->wptr);
rb->dispatch_q.expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
}
spin_unlock_irqrestore(&rb->preempt_lock, flags);
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index aeffeab2f6dc..c09d2f8c1947 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -410,8 +410,6 @@ static const unsigned int a5xx_registers[] = {
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2,
/* GPMU */
0xA800, 0xA8FF, 0xAC60, 0xAC60,
- /* DPM */
- 0xB000, 0xB97F, 0xB9A0, 0xB9BF,
};
struct a5xx_hlsq_sp_tp_regs {
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 680827e5b848..fffe08038bcd 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
- struct kgsl_cmdbatch_sync_event *sync_event)
+ struct kgsl_drawobj_sync_event *sync_event)
{
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
@@ -153,12 +153,12 @@ struct flag_entry {
const char *str;
};
-static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS};
+static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS};
-static const struct flag_entry cmdbatch_priv[] = {
- { CMDBATCH_FLAG_SKIP, "skip"},
- { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"},
- { CMDBATCH_FLAG_WFI, "wait_for_idle" },
+static const struct flag_entry cmdobj_priv[] = {
+ { CMDOBJ_SKIP, "skip"},
+ { CMDOBJ_FORCE_PREAMBLE, "force_preamble"},
+ { CMDOBJ_WFI, "wait_for_idle" },
};
static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
@@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,
seq_puts(s, "None");
}
-static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
+static void syncobj_print(struct seq_file *s,
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- /* print fences first, since they block this cmdbatch */
+ seq_puts(s, " syncobj ");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
- /*
- * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways
- * so that it is clear if the fence was a separate submit
- * or part of an IB submit.
- */
- seq_printf(s, "\t%d ", cmdbatch->timestamp);
sync_event_print(s, event);
seq_puts(s, "\n");
}
+}
- /* if this flag is set, there won't be an IB */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
- return;
+static void cmdobj_print(struct seq_file *s,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- seq_printf(s, "\t%d: ", cmdbatch->timestamp);
+ if (drawobj->type == CMDOBJ_TYPE)
+ seq_puts(s, " cmdobj ");
+ else
+ seq_puts(s, " markerobj ");
- seq_puts(s, " flags: ");
- print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
- cmdbatch->flags);
+ seq_printf(s, "\t %d ", drawobj->timestamp);
seq_puts(s, " priv: ");
- print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv),
- cmdbatch->priv);
+ print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv),
+ cmdobj->priv);
+}
+
+static void drawobj_print(struct seq_file *s,
+ struct kgsl_drawobj *drawobj)
+{
+ if (drawobj->type == SYNCOBJ_TYPE)
+ syncobj_print(s, SYNCOBJ(drawobj));
+ else if ((drawobj->type == CMDOBJ_TYPE) ||
+ (drawobj->type == MARKEROBJ_TYPE))
+ cmdobj_print(s, CMDOBJ(drawobj));
+
+ seq_puts(s, " flags: ");
+ print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
+ drawobj->flags);
seq_puts(s, "\n");
}
@@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused)
queued, consumed, retired,
drawctxt->internal_timestamp);
- seq_puts(s, "cmdqueue:\n");
+ seq_puts(s, "drawqueue:\n");
spin_lock(&drawctxt->lock);
- for (i = drawctxt->cmdqueue_head;
- i != drawctxt->cmdqueue_tail;
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE))
- cmdbatch_print(s, drawctxt->cmdqueue[i]);
+ for (i = drawctxt->drawqueue_head;
+ i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
+ drawobj_print(s, drawctxt->drawqueue[i]);
spin_unlock(&drawctxt->lock);
seq_puts(s, "events:\n");
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 522c32743d3d..cb4108b4e1f9 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -25,7 +25,7 @@
#include "adreno_trace.h"
#include "kgsl_sharedmem.h"
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
unsigned int adreno_dispatch_starvation_time = 2000;
@@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25;
unsigned int adreno_disp_preempt_fair_sched;
/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
+static unsigned int _context_drawqueue_size = 50;
/* Number of milliseconds to wait for the context queue to clear */
static unsigned int _context_queue_wait = 10000;
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
+/* Number of drawobjs sent at a time from a single context */
+static unsigned int _context_drawobj_burst = 5;
/*
* GFT throttle parameters. If GFT recovered more than
@@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15;
static unsigned int _dispatcher_q_inflight_lo = 4;
/* Command batch timeout (in milliseconds) */
-unsigned int adreno_cmdbatch_timeout = 2000;
+unsigned int adreno_drawobj_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
-#define CMDQUEUE_RB(_cmdqueue) \
+#define DRAWQUEUE_RB(_drawqueue) \
((struct adreno_ringbuffer *) \
- container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+ container_of((_drawqueue),\
+ struct adreno_ringbuffer, dispatch_q))
-#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue);
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue);
-static inline bool cmdqueue_is_current(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool drawqueue_is_current(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue);
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
return (adreno_dev->cur_rb == rb);
@@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data)
return time_after(jiffies, expires) ? 0 : 1;
}
-static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
+static int __count_drawqueue_context(struct adreno_context *drawctxt,
+ void *data)
{
unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
@@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
return 0;
return (&drawctxt->rb->dispatch_q ==
- (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0;
+ (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
}
static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
@@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
}
static void _track_context(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue,
+ struct adreno_dispatcher_drawqueue *drawqueue,
struct adreno_context *drawctxt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev,
device->active_context_count =
_adreno_count_active_contexts(adreno_dev,
__count_context, NULL);
- cmdqueue->active_context_count =
+ drawqueue->active_context_count =
_adreno_count_active_contexts(adreno_dev,
- __count_cmdqueue_context, cmdqueue);
+ __count_drawqueue_context, drawqueue);
spin_unlock(&adreno_dev->active_list_lock);
}
@@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev,
*/
static inline int
-_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue)
+_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue->active_context_count > 1)
+ return (drawqueue->active_context_count > 1)
? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi;
}
@@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev)
}
/**
- * _retire_marker() - Retire a marker command batch without sending it to the
- * hardware
- * @cmdbatch: Pointer to the cmdbatch to retire
+ * _retire_timestamp() - Retire object without sending it
+ * to the hardware
+ * @drawobj: Pointer to the object to retire
*
- * In some cases marker commands can be retired by the software without going to
- * the GPU. In those cases, update the memstore from the CPU, kick off the
- * event engine to handle expired events and destroy the command batch.
+ * In some cases ibs can be retired by the software
+ * without going to the GPU. In those cases, update the
+ * memstore from the CPU, kick off the event engine to handle
+ * expired events and destroy the ib.
*/
-static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
+static void _retire_timestamp(struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_context *context = drawobj->context;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
struct kgsl_device *device = context->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
/* Retire pending GPU events for the object */
@@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
* rptr scratch out address. At this point GPU clocks turned off.
* So avoid reading GPU register directly for A3xx.
*/
- if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- 0);
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)))
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ 0, 0);
else
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- adreno_get_rptr(drawctxt->rb));
- kgsl_cmdbatch_destroy(cmdbatch);
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb), 0);
+ kgsl_drawobj_destroy(drawobj);
}
static int _check_context_queue(struct adreno_context *drawctxt)
@@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt)
if (kgsl_context_invalid(&drawctxt->base))
ret = 1;
else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+ ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0;
spin_unlock(&drawctxt->lock);
@@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt)
* return true if this is a marker command and the dependent timestamp has
* retired
*/
-static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
-{
- return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
- cmdbatch->marker_timestamp);
-}
-
-static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
+static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj)
{
- drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
-}
-/**
- * Removes all expired marker and sync cmdbatches from
- * the context queue when marker command and dependent
- * timestamp are retired. This function is recursive.
- * returns cmdbatch if context has command, NULL otherwise.
- */
-static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch;
-
- if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
- return NULL;
-
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- if (cmdbatch == NULL)
- return NULL;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
- /* Check to see if this is a marker we can skip over */
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- _marker_expired(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- _retire_marker(cmdbatch);
- return _expire_markers(drawctxt);
- }
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- kgsl_cmdbatch_destroy(cmdbatch);
- return _expire_markers(drawctxt);
- }
- }
-
- return cmdbatch;
+ return (drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ kgsl_check_timestamp(drawobj->device, drawobj->context,
+ markerobj->marker_timestamp);
}
-static void expire_markers(struct adreno_context *drawctxt)
+static inline void _pop_drawobj(struct adreno_context *drawctxt)
{
- spin_lock(&drawctxt->lock);
- _expire_markers(drawctxt);
- spin_unlock(&drawctxt->lock);
+ drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE);
+ drawctxt->queued--;
}
-static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
+static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
- bool pending = false;
-
- cmdbatch = _expire_markers(drawctxt);
-
- if (cmdbatch == NULL)
- return NULL;
+ if (_marker_expired(cmdobj)) {
+ _pop_drawobj(drawctxt);
+ _retire_timestamp(DRAWOBJ(cmdobj));
+ return 0;
+ }
/*
- * If the marker isn't expired but the SKIP bit is set
- * then there are real commands following this one in
- * the queue. This means that we need to dispatch the
- * command so that we can keep the timestamp accounting
- * correct. If skip isn't set then we block this queue
+ * If the marker isn't expired but the SKIP bit
+ * is set then there are real commands following
+ * this one in the queue. This means that we
+ * need to dispatch the command so that we can
+ * keep the timestamp accounting correct. If
+ * skip isn't set then we block this queue
* until the dependent timestamp expires
*/
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
- pending = true;
+ return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN;
+}
- if (kgsl_cmdbatch_events_pending(cmdbatch))
- pending = true;
+static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj,
+ struct adreno_context *drawctxt)
+{
+ if (!kgsl_drawobj_events_pending(syncobj)) {
+ _pop_drawobj(drawctxt);
+ kgsl_drawobj_destroy(DRAWOBJ(syncobj));
+ return 0;
+ }
/*
- * If changes are pending and the canary timer hasn't been
- * started yet, start it
+ * If we got here, there are pending events for sync object.
+ * Start the canary timer if it hasnt been started already.
*/
- if (pending) {
- /*
- * If syncpoints are pending start the canary timer if
- * it hasn't already been started
- */
- if (!cmdbatch->timeout_jiffies) {
- cmdbatch->timeout_jiffies =
- jiffies + msecs_to_jiffies(5000);
- mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies);
- }
-
- return ERR_PTR(-EAGAIN);
+ if (!syncobj->timeout_jiffies) {
+ syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&syncobj->timer, syncobj->timeout_jiffies);
}
- _pop_cmdbatch(drawctxt);
- return cmdbatch;
+ return -EAGAIN;
}
-/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
+/*
+ * Retires all expired marker and sync objs from the context
+ * queue and returns one of the below
+ * a) next drawobj that needs to be sent to ringbuffer
+ * b) -EAGAIN for syncobj with syncpoints pending.
+ * c) -EAGAIN for markerobj whose marker timestamp has not expired yet.
+ * c) NULL for no commands remaining in drawqueue.
*/
-static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
- struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ unsigned int i = drawctxt->drawqueue_head;
+ int ret = 0;
- spin_lock(&drawctxt->lock);
- cmdbatch = _get_cmdbatch(drawctxt);
- spin_unlock(&drawctxt->lock);
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+ return NULL;
- /*
- * Delete the timer and wait for timer handler to finish executing
- * on another core before queueing the buffer. We must do this
- * without holding any spin lock that the timer handler might be using
- */
- if (!IS_ERR_OR_NULL(cmdbatch))
- del_timer_sync(&cmdbatch->timer);
+ for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+ drawobj = drawctxt->drawqueue[i];
+
+ if (drawobj == NULL)
+ return NULL;
+
+ if (drawobj->type == CMDOBJ_TYPE)
+ return drawobj;
+ else if (drawobj->type == MARKEROBJ_TYPE) {
+ ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt);
+ /* Special case where marker needs to be sent to GPU */
+ if (ret == 1)
+ return drawobj;
+ } else if (drawobj->type == SYNCOBJ_TYPE)
+ ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+
+ if (ret == -EAGAIN)
+ return ERR_PTR(-EAGAIN);
+
+ continue;
+ }
- return cmdbatch;
+ return NULL;
}
/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context
* queue
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ * @cmdobj: Pointer to the KGSL command object to requeue
*
* Failure to submit a command to the ringbuffer isn't the fault of the command
* being submitted so if a failure happens, push it back on the head of the the
* context queue to be reconsidered again unless the context got detached.
*/
-static inline int adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+static inline int adreno_dispatcher_requeue_cmdobj(
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
unsigned int prev;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
spin_lock(&drawctxt->lock);
if (kgsl_context_detached(&drawctxt->base) ||
kgsl_context_invalid(&drawctxt->base)) {
spin_unlock(&drawctxt->lock);
- /* get rid of this cmdbatch since the context is bad */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* get rid of this drawobj since the context is bad */
+ kgsl_drawobj_destroy(drawobj);
return -ENOENT;
}
- prev = drawctxt->cmdqueue_head == 0 ?
- (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) :
- (drawctxt->cmdqueue_head - 1);
+ prev = drawctxt->drawqueue_head == 0 ?
+ (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) :
+ (drawctxt->drawqueue_head - 1);
/*
* The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ * the ringbuffer queue so there is "room" to put the drawobj back in
*/
- BUG_ON(prev == drawctxt->cmdqueue_tail);
+ WARN_ON(prev == drawctxt->drawqueue_tail);
- drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->drawqueue[prev] = drawobj;
drawctxt->queued++;
/* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
+ drawctxt->drawqueue_head = prev;
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev,
}
/**
- * sendcmd() - Send a command batch to the GPU hardware
+ * sendcmd() - Send a drawobj to the GPU hardware
* @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ * @drawobj: Pointer to the KGSL drawobj being sent
*
- * Send a KGSL command batch to the GPU hardware
+ * Send a KGSL drawobj to the GPU hardware
*/
static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
struct adreno_submit_time time;
uint64_t secs = 0;
unsigned long nsecs = 0;
@@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) {
- set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv);
- cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index;
- adreno_dev->cmdbatch_profile_index =
- (adreno_dev->cmdbatch_profile_index + 1) %
- ADRENO_CMDBATCH_PROFILE_COUNT;
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) {
+ set_bit(CMDOBJ_PROFILE, &cmdobj->priv);
+ cmdobj->profile_index = adreno_dev->profile_index;
+ adreno_dev->profile_index =
+ (adreno_dev->profile_index + 1) %
+ ADRENO_DRAWOBJ_PROFILE_COUNT;
}
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time);
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time);
/*
* On the first command, if the submission was successful, then read the
@@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
secs = time.ktime;
nsecs = do_div(secs, 1000000000);
- trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
+ trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
mutex_unlock(&device->mutex);
- cmdbatch->submit_ticks = time.ticks;
+ cmdobj->submit_ticks = time.ticks;
- dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
+ dispatch_q->cmd_q[dispatch_q->tail] = cmdobj;
dispatch_q->tail = (dispatch_q->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE;
/*
* For the first submission in any given command queue update the
@@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatch_q->inflight == 1)
dispatch_q->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
/*
* If we believe ourselves to be current and preemption isn't a thing,
@@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* thing and the timer will be set up in due time
*/
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (cmdqueue_is_current(dispatch_q))
+ if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
@@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev,
static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(drawctxt->rb->dispatch_q);
int count = 0;
int ret = 0;
- int inflight = _cmdqueue_inflight(dispatch_q);
+ int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (dispatch_q->inflight >= inflight) {
- expire_markers(drawctxt);
+ spin_lock(&drawctxt->lock);
+ _process_drawqueue_get_next_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
return -EBUSY;
}
/*
- * Each context can send a specific number of command batches per cycle
+ * Each context can send a specific number of drawobjs per cycle
*/
- while ((count < _context_cmdbatch_burst) &&
+ while ((count < _context_drawobj_burst) &&
(dispatch_q->inflight < inflight)) {
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
if (adreno_gpu_fault(adreno_dev) != 0)
break;
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+ spin_lock(&drawctxt->lock);
+ drawobj = _process_drawqueue_get_next_drawobj(drawctxt);
/*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
+ * adreno_context_get_drawobj returns -EAGAIN if the current
+ * drawobj has pending sync points so no more to do here.
* When the sync points are satisfied then the context will get
* reqeueued
*/
- if (IS_ERR_OR_NULL(cmdbatch)) {
- if (IS_ERR(cmdbatch))
- ret = PTR_ERR(cmdbatch);
+ if (IS_ERR_OR_NULL(drawobj)) {
+ if (IS_ERR(drawobj))
+ ret = PTR_ERR(drawobj);
+ spin_unlock(&drawctxt->lock);
break;
}
+ _pop_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- timestamp = cmdbatch->timestamp;
-
- ret = sendcmd(adreno_dev, cmdbatch);
+ timestamp = drawobj->timestamp;
+ cmdobj = CMDOBJ(drawobj);
+ ret = sendcmd(adreno_dev, cmdobj);
/*
- * On error from sendcmd() try to requeue the command batch
+ * On error from sendcmd() try to requeue the cmdobj
* unless we got back -ENOENT which means that the context has
* been detached and there will be no more deliveries from here
*/
if (ret != 0) {
- /* Destroy the cmdbatch on -ENOENT */
+ /* Destroy the cmdobj on -ENOENT */
if (ret == -ENOENT)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
else {
/*
* If the requeue returns an error, return that
* instead of whatever sendcmd() sent us
*/
- int r = adreno_dispatcher_requeue_cmdbatch(
- drawctxt, cmdbatch);
+ int r = adreno_dispatcher_requeue_cmdobj(
+ drawctxt, cmdobj);
if (r)
ret = r;
}
@@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
/**
* get_timestamp() - Return the next timestamp for the context
* @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
+ * @drawobj - Pointer to a drawobj
* @timestamp - Pointer to a timestamp value possibly passed from the user
+ * @user_ts - user generated timestamp
*
* Assign a timestamp based on the settings of the draw context and the command
* batch.
*/
static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+ struct kgsl_drawobj *drawobj, unsigned int *timestamp,
+ unsigned int user_ts)
{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- *timestamp = 0;
- return 0;
- }
if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) {
/*
* User specified timestamps need to be greater than the last
* issued timestamp in the context
*/
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+ if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0)
return -ERANGE;
- drawctxt->timestamp = *timestamp;
+ drawctxt->timestamp = user_ts;
} else
drawctxt->timestamp++;
*timestamp = drawctxt->timestamp;
+ drawobj->timestamp = *timestamp;
return 0;
}
-/**
- * adreno_dispactcher_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+static void _set_ft_policy(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
- int ret;
-
- spin_lock(&drawctxt->lock);
-
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+ /*
+ * Set the fault tolerance policy for the command batch - assuming the
+ * context hasn't disabled FT use the current device policy
+ */
+ if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ else
+ cmdobj->fault_policy = adreno_dev->ft_policy;
+}
+static void _cmdobj_set_flags(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
/*
* Force the preamble for this submission only - this is usually
* requested by the dispatcher as part of fault recovery
*/
-
if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE,
&drawctxt->base.priv))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/*
- * Force the premable if set from userspace in the context or cmdbatch
- * flags
+ * Force the premable if set from userspace in the context or
+ * command obj flags
*/
-
if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) ||
- (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH))
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
- /* Skip this cmdbatch commands if IFH_NOP is enabled */
+ /* Skip this ib if IFH_NOP is enabled */
if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP)
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
* If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
+ * then mark the command obj as skipped. It will still progress
* through the pipeline but it won't actually send any commands
*/
if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
- * If this command batch represents the EOF then clear the way
+ * If this command obj represents the EOF then clear the way
* for the dispatcher to continue submitting
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) {
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) {
clear_bit(ADRENO_CONTEXT_SKIP_EOF,
&drawctxt->base.priv);
@@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
&drawctxt->base.priv);
}
}
+}
- /* Wait for room in the context queue */
+static inline int _check_context_state(struct kgsl_context *context)
+{
+ if (kgsl_context_invalid(context))
+ return -EDEADLK;
+
+ if (kgsl_context_detached(context))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_memobj_node *ib)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
+ if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
+ pr_context(device, context, "ctxt %d invalid ib size %lld\n",
+ context->id, ib->size);
+ return false;
+ }
+
+ /* Make sure that the address is mapped */
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
+ pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
+ context->id, ib->gpuaddr);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_memobj_node *ib;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ /* Verify the IBs before they get queued */
+ if (drawobj[i]->type == CMDOBJ_TYPE) {
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]);
+
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
+ if (_verify_ib(dev_priv,
+ &ADRENO_CONTEXT(context)->base, ib)
+ == false)
+ return -EINVAL;
+ /*
+ * Clear the wake on touch bit to indicate an IB has
+ * been submitted since the last time we set it.
+ * But only clear it when we have rendering commands.
+ */
+ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+ }
+
+ /* A3XX does not have support for drawobj profiling */
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)) &&
+ (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING))
+ return -EOPNOTSUPP;
+ }
- while (drawctxt->queued >= _context_cmdqueue_size) {
+ return 0;
+}
+
+static inline int _wait_for_room_in_context_queue(
+ struct adreno_context *drawctxt)
+{
+ int ret = 0;
+
+ /* Wait for room in the context queue */
+ while (drawctxt->queued >= _context_drawqueue_size) {
trace_adreno_drawctxt_sleep(drawctxt);
spin_unlock(&drawctxt->lock);
@@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
spin_lock(&drawctxt->lock);
trace_adreno_drawctxt_wake(drawctxt);
- if (ret <= 0) {
- spin_unlock(&drawctxt->lock);
+ if (ret <= 0)
return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
}
+
+ return 0;
+}
+
+static unsigned int _check_context_state_to_queue_cmds(
+ struct adreno_context *drawctxt)
+{
+ int ret = _check_context_state(&drawctxt->base);
+
+ if (ret)
+ return ret;
+
+ ret = _wait_for_room_in_context_queue(drawctxt);
+ if (ret)
+ return ret;
+
/*
* Account for the possiblity that the context got invalidated
* while we were sleeping
*/
+ return _check_context_state(&drawctxt->base);
+}
- if (kgsl_context_invalid(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -EDEADLK;
- }
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+static void _queue_drawobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj *drawobj)
+{
+ /* Put the command into the queue */
+ drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj;
+ drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+}
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- spin_unlock(&drawctxt->lock);
+static int _queue_markerobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
+ int ret;
+
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
return ret;
+
+ /*
+ * See if we can fastpath this thing - if nothing is queued
+ * and nothing is inflight retire without bothering the GPU
+ */
+ if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
+ drawobj->context, drawctxt->queued_timestamp)) {
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+ _retire_timestamp(drawobj);
+ return 1;
}
- cmdbatch->timestamp = *timestamp;
+ /*
+ * Remember the last queued timestamp - the marker will block
+ * until that timestamp is expired (unless another command
+ * comes along and forces the marker to execute)
+ */
- if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
+ markerobj->marker_timestamp = drawctxt->queued_timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, markerobj);
+ _cmdobj_set_flags(drawctxt, markerobj);
- /*
- * See if we can fastpath this thing - if nothing is queued
- * and nothing is inflight retire without bothering the GPU
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
- cmdbatch->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(cmdbatch,
- drawctxt->queued);
+ return 0;
+}
- _retire_marker(cmdbatch);
- spin_unlock(&drawctxt->lock);
- return 0;
- }
+static int _queue_cmdobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ unsigned int j;
+ int ret;
- /*
- * Remember the last queued timestamp - the marker will block
- * until that timestamp is expired (unless another command
- * comes along and forces the marker to execute)
- */
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
+ return ret;
+
+ /*
+ * If this is a real command then we need to force any markers
+ * queued before it to dispatch to keep time linear - set the
+ * skip bit so the commands get NOPed.
+ */
+ j = drawctxt->drawqueue_head;
+
+ while (j != drawctxt->drawqueue_tail) {
+ if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) {
+ struct kgsl_drawobj_cmd *markerobj =
+ CMDOBJ(drawctxt->drawqueue[j]);
+ set_bit(CMDOBJ_SKIP, &markerobj->priv);
+ }
- cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
+ j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE);
}
- /* SYNC commands have timestamp 0 and will get optimized out anyway */
- if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
- drawctxt->queued_timestamp = *timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, cmdobj);
+ _cmdobj_set_flags(drawctxt, cmdobj);
- /*
- * Set the fault tolerance policy for the command batch - assuming the
- * context hasn't disabled FT use the current device policy
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
- else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
+ return 0;
+}
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+static void _queue_syncobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
- /*
- * If this is a real command then we need to force any markers queued
- * before it to dispatch to keep time linear - set the skip bit so
- * the commands get NOPed.
- */
+ *timestamp = 0;
+ drawobj->timestamp = 0;
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
- unsigned int i = drawctxt->cmdqueue_head;
+ _queue_drawobj(drawctxt, drawobj);
+}
- while (i != drawctxt->cmdqueue_tail) {
- if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
- set_bit(CMDBATCH_FLAG_SKIP,
- &drawctxt->cmdqueue[i]->priv);
+/**
+ * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * @dev_priv: Pointer to the device private struct
+ * @context: Pointer to the kgsl draw context
+ * @drawobj: Pointer to the array of drawobj's being submitted
+ * @count: Number of drawobj's being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp)
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_dispatcher_drawqueue *dispatch_q;
+ int ret;
+ unsigned int i, user_ts;
+
+ ret = _check_context_state(&drawctxt->base);
+ if (ret)
+ return ret;
+
+ ret = _verify_cmdobj(dev_priv, context, drawobj, count);
+ if (ret)
+ return ret;
+
+ /* wait for the suspend gate */
+ wait_for_completion(&device->halt_gate);
+
+ spin_lock(&drawctxt->lock);
+
+ ret = _check_context_state_to_queue_cmds(drawctxt);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+
+ user_ts = *timestamp;
+
+ for (i = 0; i < count; i++) {
+
+ switch (drawobj[i]->type) {
+ case MARKEROBJ_TYPE:
+ ret = _queue_markerobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret == 1) {
+ spin_unlock(&drawctxt->lock);
+ goto done;
+ } else if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case CMDOBJ_TYPE:
+ ret = _queue_cmdobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case SYNCOBJ_TYPE:
+ _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
+ timestamp);
+ break;
+ default:
+ spin_unlock(&drawctxt->lock);
+ return -EINVAL;
}
+
}
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+ dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]);
_track_context(adreno_dev, dispatch_q, drawctxt);
@@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* queue will try to schedule new commands anyway.
*/
- if (dispatch_q->inflight < _context_cmdbatch_burst)
+ if (dispatch_q->inflight < _context_drawobj_burst)
adreno_dispatcher_issuecmds(adreno_dev);
+done:
+ if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
+ return -EPROTO;
return 0;
}
@@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id)
}
/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
+ * If an IB inside of the drawobj has a gpuaddr that matches the base
* passed in then zero the size which effectively skips it when it is submitted
* in the ringbuffer.
*/
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
+static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base)
{
struct kgsl_memobj_node *ib;
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
if (ib->gpuaddr == base) {
ib->priv |= MEMOBJ_SKIP;
if (base)
@@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
}
}
-static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int i;
/*
@@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
* b) force preamble for next commandbatch
*/
for (i = 1; i < count; i++) {
- if (replay[i]->context->id == cmdbatch->context->id) {
+ if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) {
replay[i]->fault_policy = replay[0]->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery);
break;
}
@@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
drawctxt->fault_policy = replay[0]->fault_policy;
}
- /* set the flags to skip this cmdbatch */
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
- cmdbatch->fault_recovery = 0;
+ /* set the flags to skip this cmdobj */
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
+ cmdobj->fault_recovery = 0;
}
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int skip = 1;
int i;
for (i = 0; i < count; i++) {
+ struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]);
+
/*
- * Only operate on command batches that belong to the
+ * Only operate on drawobj's that belong to the
* faulting context
*/
- if (replay[i]->context->id != cmdbatch->context->id)
+ if (replay_obj->context->id != drawobj->context->id)
continue;
/*
- * Skip all the command batches in this context until
+ * Skip all the drawobjs in this context until
* the EOF flag is seen. If the EOF flag is seen then
* force the preamble for the next command.
*/
if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+ set_bit(CMDOBJ_SKIP, &replay[i]->priv);
- if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME)
+ if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME)
skip = 0;
} else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
return;
}
}
@@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv);
}
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
+static void remove_invalidated_cmdobjs(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count)
{
int i;
for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- if (cmd == NULL)
+ struct kgsl_drawobj_cmd *cmdobj = replay[i];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ if (cmdobj == NULL)
continue;
- if (kgsl_context_detached(cmd->context) ||
- kgsl_context_invalid(cmd->context)) {
+ if (kgsl_context_detached(drawobj->context) ||
+ kgsl_context_invalid(drawobj->context)) {
replay[i] = NULL;
mutex_lock(&device->mutex);
kgsl_cancel_events_timestamp(device,
- &cmd->context->events, cmd->timestamp);
+ &drawobj->context->events, drawobj->timestamp);
mutex_unlock(&device->mutex);
- kgsl_cmdbatch_destroy(cmd);
+ kgsl_drawobj_destroy(drawobj);
}
}
}
@@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
uint64_t ib1base, ib2base;
@@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device,
ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
- if (cmdbatch != NULL) {
+ if (drawobj != NULL) {
struct adreno_context *drawctxt =
- ADRENO_CONTEXT(cmdbatch->context);
+ ADRENO_CONTEXT(drawobj->context);
- trace_adreno_gpu_fault(cmdbatch->context->id,
- cmdbatch->timestamp,
+ trace_adreno_gpu_fault(drawobj->context->id,
+ drawobj->timestamp,
status, rptr, wptr, ib1base, ib1sz,
ib2base, ib2sz, drawctxt->rb->id);
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
+ drawobj->context->id, drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
rb->id, rptr, rb->wptr);
} else {
@@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device,
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
kgsl_context_detached(&drawctxt->base)) {
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu detached context %d\n", cmdbatch->context->id);
+ pr_context(KGSL_DEVICE(adreno_dev), drawobj->context,
+ "gpu detached context %d\n", drawobj->context->id);
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
}
}
/**
- * process_cmdbatch_fault() - Process a cmdbatch for fault policies
- * @device: Device on which the cmdbatch caused a fault
- * @replay: List of cmdbatches that are to be replayed on the device. The
- * faulting cmdbatch is the first command in the replay list and the remaining
- * cmdbatches in the list are commands that were submitted to the same queue
+ * process_cmdobj_fault() - Process a cmdobj for fault policies
+ * @device: Device on which the cmdobj caused a fault
+ * @replay: List of cmdobj's that are to be replayed on the device. The
+ * first command in the replay list is the faulting command and the remaining
+ * cmdobj's in the list are commands that were submitted to the same queue
* as the faulting one.
- * @count: Number of cmdbatches in replay
+ * @count: Number of cmdobj's in replay
* @base: The IB1 base at the time of fault
* @fault: The fault type
*/
-static void process_cmdbatch_fault(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count,
+static void process_cmdobj_fault(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count,
unsigned int base,
int fault)
{
- struct kgsl_cmdbatch *cmdbatch = replay[0];
+ struct kgsl_drawobj_cmd *cmdobj = replay[0];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
int i;
char *state = "failed";
@@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* where 1st and 4th gpu hang are more than 3 seconds apart we
* won't disable GFT and invalidate the context.
*/
- if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
- if (time_after(jiffies, (cmdbatch->context->fault_time
+ if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) {
+ if (time_after(jiffies, (drawobj->context->fault_time
+ msecs_to_jiffies(_fault_throttle_time)))) {
- cmdbatch->context->fault_time = jiffies;
- cmdbatch->context->fault_count = 1;
+ drawobj->context->fault_time = jiffies;
+ drawobj->context->fault_count = 1;
} else {
- cmdbatch->context->fault_count++;
- if (cmdbatch->context->fault_count >
+ drawobj->context->fault_count++;
+ if (drawobj->context->fault_count >
_fault_throttle_burst) {
set_bit(KGSL_FT_DISABLE,
- &cmdbatch->fault_policy);
- pr_context(device, cmdbatch->context,
+ &cmdobj->fault_policy);
+ pr_context(device, drawobj->context,
"gpu fault threshold exceeded %d faults in %d msecs\n",
_fault_throttle_burst,
_fault_throttle_time);
@@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
}
/*
- * If FT is disabled for this cmdbatch invalidate immediately
+ * If FT is disabled for this cmdobj invalidate immediately
*/
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) {
state = "skipped";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/* If the context is detached do not run FT on context */
- if (kgsl_context_detached(cmdbatch->context)) {
+ if (kgsl_context_detached(drawobj->context)) {
state = "detached";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * Set a flag so we don't print another PM dump if the cmdobj fails
* again on replay
*/
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy);
/*
* A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
+ * wrong with the cmdobj - no point in trying to replay it
* Clear the replay bit and move on to the next policy level
*/
if (fault & ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+ clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy));
/*
* A timeout fault means the IB timed out - clear the policy and
* invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
+ * because we won't see this cmdobj again
*/
if (fault & ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
/*
* If the context had a GPU page fault then it is likely it would fault
@@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
*/
if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv)) {
+ &drawobj->context->priv)) {
/* we'll need to resume the mmu later... */
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv);
+ &drawobj->context->priv);
}
/*
- * Execute the fault tolerance policy. Each command batch stores the
+ * Execute the fault tolerance policy. Each cmdobj stores the
* current fault policy that was set when it was queued.
* As the options are tried in descending priority
* (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
+ * from the cmdobj policy so the next thing can be tried if the
* change comes around again
*/
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ /* Replay the hanging cmdobj again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery);
return;
}
/*
* Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
+ * Note that the last IB1 might not be in the "hung" cmdobj
* because the CP may have caused a page-fault while it was prefetching
* the next IB1/IB2. walk all outstanding commands and zap the
* supposedly bad IB1 where ever it lurks.
*/
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery);
for (i = 0; i < count; i++) {
if (replay[i] != NULL &&
- replay[i]->context->id == cmdbatch->context->id)
- cmdbatch_skip_ib(replay[i], base);
+ DRAWOBJ(replay[i])->context->id ==
+ drawobj->context->id)
+ _skip_ib(replay[i], base);
}
return;
}
- /* Skip the faulted command batch submission */
- if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD));
+ /* Skip the faulted cmdobj submission */
+ if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD));
- /* Skip faulting command batch */
- cmdbatch_skip_cmd(cmdbatch, replay, count);
+ /* Skip faulting cmdobj */
+ _skip_cmd(cmdobj, replay, count);
return;
}
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj,
BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+ set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery);
/*
- * Skip all the pending command batches for this context until
+ * Skip all the pending cmdobj's for this context until
* the EOF frame is seen
*/
- cmdbatch_skip_frame(cmdbatch, replay, count);
+ _skip_frame(cmdobj, replay, count);
return;
}
/* If we get here then all the policies failed */
- pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n",
- state, cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
+ state, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
- mark_guilty_context(device, cmdbatch->context->id);
+ mark_guilty_context(device, drawobj->context->id);
/* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
+ adreno_drawctxt_invalidate(device, drawobj->context);
}
/**
@@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* @base: The IB1 base during the fault
*/
static void recover_dispatch_q(struct kgsl_device *device,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
+ struct adreno_dispatcher_drawqueue *dispatch_q,
int fault,
unsigned int base)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_drawobj_cmd **replay;
unsigned int ptr;
int first = 0;
int count = 0;
@@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device,
/* Recovery failed - mark everybody on this q guilty */
while (ptr != dispatch_q->tail) {
- struct kgsl_context *context =
- dispatch_q->cmd_q[ptr]->context;
+ struct kgsl_drawobj_cmd *cmdobj =
+ dispatch_q->cmd_q[ptr];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- mark_guilty_context(device, context->id);
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]);
+ mark_guilty_context(device, drawobj->context->id);
+ adreno_drawctxt_invalidate(device, drawobj->context);
+ kgsl_drawobj_destroy(drawobj);
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
/*
@@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device,
goto replay;
}
- /* Copy the inflight command batches into the temporary storage */
+ /* Copy the inflight cmdobj's into the temporary storage */
ptr = dispatch_q->head;
while (ptr != dispatch_q->tail) {
replay[count++] = dispatch_q->cmd_q[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
if (fault && count)
- process_cmdbatch_fault(device, replay,
+ process_cmdobj_fault(device, replay,
count, base, fault);
replay:
dispatch_q->inflight = 0;
dispatch_q->head = dispatch_q->tail = 0;
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
+ /* Remove any pending cmdobj's that have been invalidated */
+ remove_invalidated_cmdobjs(device, replay, count);
/* Replay the pending command buffers */
for (i = 0; i < count; i++) {
@@ -1674,16 +1835,16 @@ replay:
*/
if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
first = 1;
}
/*
- * Force each command batch to wait for idle - this avoids weird
+ * Force each cmdobj to wait for idle - this avoids weird
* CP parse issues
*/
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+ set_bit(CMDOBJ_WFI, &replay[i]->priv);
ret = sendcmd(adreno_dev, replay[i]);
@@ -1693,15 +1854,18 @@ replay:
*/
if (ret) {
- pr_context(device, replay[i]->context,
+ pr_context(device, replay[i]->base.context,
"gpu reset failed ctx %d ts %d\n",
- replay[i]->context->id, replay[i]->timestamp);
+ replay[i]->base.context->id,
+ replay[i]->base.timestamp);
/* Mark this context as guilty (failed recovery) */
- mark_guilty_context(device, replay[i]->context->id);
+ mark_guilty_context(device,
+ replay[i]->base.context->id);
- adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
+ adreno_drawctxt_invalidate(device,
+ replay[i]->base.context);
+ remove_invalidated_cmdobjs(device, &replay[i],
count - i);
}
}
@@ -1713,36 +1877,38 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
- /* Always dump the snapshot on a non-cmdbatch failure */
- if (cmdbatch == NULL) {
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ /* Always dump the snapshot on a non-drawobj failure */
+ if (cmdobj == NULL) {
adreno_fault_header(device, rb, NULL);
kgsl_device_snapshot(device, NULL);
return;
}
/* Skip everything if the PMDUMP flag is set */
- if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy))
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdbatch);
+ adreno_fault_header(device, rb, cmdobj);
- if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
- kgsl_device_snapshot(device, cmdbatch->context);
+ if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, drawobj->context);
}
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp;
+ struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
struct adreno_ringbuffer *rb;
struct adreno_ringbuffer *hung_rb = NULL;
unsigned int reg;
uint64_t base;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj_cmd *cmdobj = NULL;
int ret, i;
int fault;
int halt;
@@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
/*
- * retire cmdbatches from all the dispatch_q's before starting recovery
+ * retire cmdobj's from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_retire_cmdqueue(adreno_dev,
+ adreno_dispatch_retire_drawqueue(adreno_dev,
&(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
@@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
+ if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) {
+ cmdobj = dispatch_q->cmd_q[dispatch_q->head];
+ trace_adreno_cmdbatch_fault(cmdobj, fault);
}
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, hung_rb, cmdbatch);
+ do_header_and_snapshot(device, hung_rb, cmdobj);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
return 1;
}
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+static inline int drawobj_consumed(struct kgsl_drawobj *drawobj,
unsigned int consumed, unsigned int retired)
{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+ return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, drawobj->timestamp) < 0));
}
static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
static struct {
unsigned int mask;
const char *str;
} flags[] = { ADRENO_FT_TYPES };
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG);
char *result = "unknown";
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
for (i = 0; i < ARRAY_SIZE(flags); i++) {
if (flags[i].mask == BIT(nr)) {
@@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device,
}
}
- pr_context(device, cmdbatch->context,
+ pr_context(device, drawobj->context,
"gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
+ result, drawobj->context->id, drawobj->timestamp,
+ cmdobj->fault_recovery);
}
-static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire)
+static void cmdobj_profile_ticks(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire)
{
- void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr;
- struct adreno_cmdbatch_profile_entry *entry;
+ void *ptr = adreno_dev->profile_buffer.hostptr;
+ struct adreno_drawobj_profile_entry *entry;
- entry = (struct adreno_cmdbatch_profile_entry *)
- (ptr + (cmdbatch->profile_index * sizeof(*entry)));
+ entry = (struct adreno_drawobj_profile_entry *)
+ (ptr + (cmdobj->profile_index * sizeof(*entry)));
rmb();
*start = entry->started;
*retire = entry->retired;
}
-static void retire_cmdbatch(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+static void retire_cmdobj(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
uint64_t start = 0, end = 0;
- if (cmdbatch->fault_recovery != 0) {
- set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
- _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ if (cmdobj->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv))
+ cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end);
/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
@@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery);
else
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj),
+ adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery);
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- end - cmdbatch->submit_ticks;
+ end - cmdobj->submit_ticks;
drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
SUBMIT_RETIRE_TICKS_SIZE;
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (!adreno_cmdqueue_is_empty(cmdqueue)) {
- struct kgsl_cmdbatch *cmdbatch =
- cmdqueue->cmd_q[cmdqueue->head];
+ while (!adreno_drawqueue_is_empty(drawqueue)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ drawqueue->cmd_q[drawqueue->head];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!kgsl_check_timestamp(device, cmdbatch->context,
- cmdbatch->timestamp))
+ if (!kgsl_check_timestamp(device, drawobj->context,
+ drawobj->timestamp))
break;
- retire_cmdbatch(adreno_dev, cmdbatch);
+ retire_cmdobj(adreno_dev, cmdobj);
dispatcher->inflight--;
- cmdqueue->inflight--;
+ drawqueue->inflight--;
- cmdqueue->cmd_q[cmdqueue->head] = NULL;
+ drawqueue->cmd_q[drawqueue->head] = NULL;
- cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
count++;
}
@@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
}
static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
+ struct kgsl_drawobj *drawobj =
+ DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);
/* Don't timeout if the timer hasn't expired yet (duh) */
- if (time_is_after_jiffies(cmdqueue->expires))
+ if (time_is_after_jiffies(drawqueue->expires))
return;
/* Don't timeout if the IB timeout is disabled globally */
@@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
return;
/* Don't time out if the context has disabled it */
- if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
return;
- pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
+ drawobj->context->id, drawobj->timestamp);
adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
}
-static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
+ int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);
/* Nothing to do if there are no pending commands */
- if (adreno_cmdqueue_is_empty(cmdqueue))
+ if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the cmdqueue timeout if we are about to preempt out */
+ /* Don't update the drawqueue timeout if we are about to preempt out */
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
return count;
- /* Don't update the cmdqueue timeout if it isn't active */
- if (!cmdqueue_is_current(cmdqueue))
+ /* Don't update the drawqueue timeout if it isn't active */
+ if (!drawqueue_is_current(drawqueue))
return count;
/*
@@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
*/
if (count) {
- cmdqueue->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ drawqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
return count;
}
/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
- * current cmdbatch and fault if it has expired
+ * current drawobj and fault if it has expired
*/
- _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ _adreno_dispatch_check_timeout(adreno_dev, drawqueue);
return 0;
}
@@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
/* Check to see if we need to update the command timer */
if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(adreno_dev->cur_rb);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(adreno_dev->cur_rb);
- if (!adreno_cmdqueue_is_empty(cmdqueue))
- mod_timer(&dispatcher->timer, cmdqueue->expires);
+ if (!adreno_drawqueue_is_empty(drawqueue))
+ mod_timer(&dispatcher->timer, drawqueue->expires);
}
}
@@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work)
/*
* As long as there are inflight commands, process retired comamnds from
- * all cmdqueues
+ * all drawqueues
*/
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(&adreno_dev->ringbuffers[i]);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(&adreno_dev->ringbuffers[i]);
- count += adreno_dispatch_process_cmdqueue(adreno_dev,
- cmdqueue);
+ count += adreno_dispatch_process_drawqueue(adreno_dev,
+ drawqueue);
if (dispatcher->inflight == 0)
break;
}
@@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
}
/*
- * This is called on a regular basis while command batches are inflight. Fault
+ * This is called on a regular basis while cmdobj's are inflight. Fault
* detection registers are read and compared to the existing values - if they
* changed then the GPU is still running. If they are the same between
* subsequent calls then the GPU may have faulted
@@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data)
*/
void adreno_dispatcher_start(struct kgsl_device *device)
{
- complete_all(&device->cmdbatch_gate);
+ complete_all(&device->halt_gate);
/* Schedule the work loop to get things going */
adreno_dispatcher_schedule(device);
@@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
del_timer_sync(&dispatcher->fault_timer);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(rb->dispatch_q);
- while (!adreno_cmdqueue_is_empty(dispatch_q)) {
- kgsl_cmdbatch_destroy(
- dispatch_q->cmd_q[dispatch_q->head]);
+ while (!adreno_drawqueue_is_empty(dispatch_q)) {
+ kgsl_drawobj_destroy(
+ DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head]));
dispatch_q->head = (dispatch_q->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ % ADRENO_DISPATCH_DRAWQUEUE_SIZE;
}
}
@@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
*((unsigned int *) attr->value));
}
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE,
_dispatcher_q_inflight_hi);
static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644,
- ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo);
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo);
/*
* Our code that "puts back" a command from the context is much cleaner
* if we are sure that there will always be enough room in the
* ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1
*/
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size);
static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0,
- adreno_cmdbatch_timeout);
+ _context_drawobj_burst);
+static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0,
+ adreno_drawobj_timeout);
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
@@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
&dispatcher_attr_inflight_low_latency.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_drawqueue_size.attr,
&dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_drawobj_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
&dispatcher_attr_fault_throttle_time.attr,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 699c3e4adb27..cb9106fedc82 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -15,7 +15,7 @@
#define ____ADRENO_DISPATCHER_H
extern unsigned int adreno_disp_preempt_fair_sched;
-extern unsigned int adreno_cmdbatch_timeout;
+extern unsigned int adreno_drawobj_timeout;
extern unsigned int adreno_dispatch_starvation_time;
extern unsigned int adreno_dispatch_time_slice;
@@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states {
* sizes that can be chosen at runtime
*/
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/**
- * struct adreno_dispatcher_cmdqueue - List of commands for a RB level
- * @cmd_q: List of command batches submitted to dispatcher
+ * struct adreno_dispatcher_drawqueue - List of commands for a RB level
+ * @cmd_q: List of command obj's submitted to dispatcher
* @inflight: Number of commands inflight in this q
* @head: Head pointer to the q
* @tail: Queues tail pointer
- * @active_context_count: Number of active contexts seen in this rb cmdqueue
- * @expires: The jiffies value at which this cmdqueue has run too long
+ * @active_context_count: Number of active contexts seen in this rb drawqueue
+ * @expires: The jiffies value at which this drawqueue has run too long
*/
-struct adreno_dispatcher_cmdqueue {
- struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+struct adreno_dispatcher_drawqueue {
+ struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE];
unsigned int inflight;
unsigned int head;
unsigned int tail;
@@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue {
* struct adreno_dispatcher - container for the adreno GPU dispatcher
* @mutex: Mutex to protect the structure
* @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
+ * @timer: Timer to monitor the progress of the drawobjs
+ * @inflight: Number of drawobj operations pending in the ringbuffer
* @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
+ * @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
@@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void adreno_dispatcher_schedule(struct kgsl_device *device);
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
@@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q);
+ struct adreno_dispatcher_drawqueue *dispatch_q);
-static inline bool adreno_cmdqueue_is_empty(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool adreno_drawqueue_is_empty(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+ return (drawqueue != NULL && drawqueue->head == drawqueue->tail);
}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index fb95f6108fb8..3a110ed221a8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
/*
- * We may have cmdbatch timer running, which also uses same
+ * We may have kgsl sync obj timer running, which also uses same
* lock, take a lock with software interrupt disabled (bh)
* to avoid spin lock recursion.
*
* Use Spin trylock because dispatcher can acquire drawctxt->lock
* if context is pending and the fence it is waiting on just got
* signalled. Dispatcher acquires drawctxt->lock and tries to
- * delete the cmdbatch timer using del_timer_sync().
+ * delete the sync obj timer using del_timer_sync().
* del_timer_sync() waits till timer and its pending handlers
* are deleted. But if the timer expires at the same time,
* timer handler could be waiting on drawctxt->lock leading to a
@@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
context->id, queue, drawctxt->submitted_timestamp,
start, retire);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
+ if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
dev_err(device->dev,
" possible deadlock. Context %d might be blocked for itself\n",
context->id);
goto stats;
}
- if (kgsl_cmdbatch_events_pending(cmdbatch)) {
- dev_err(device->dev,
- " context[%d] (ts=%d) Active sync points:\n",
- context->id, cmdbatch->timestamp);
+ if (drawobj->type == SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
+
+ if (kgsl_drawobj_events_pending(syncobj)) {
+ dev_err(device->dev,
+ " context[%d] (ts=%d) Active sync points:\n",
+ context->id, drawobj->timestamp);
- kgsl_dump_syncpoints(device, cmdbatch);
+ kgsl_dump_syncpoints(device, syncobj);
+ }
}
}
@@ -229,19 +233,19 @@ done:
return ret;
}
-static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch **list)
+static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
+ struct kgsl_drawobj **list)
{
int count = 0;
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
- list[count++] = cmdbatch;
+ list[count++] = drawobj;
}
return count;
@@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
int i, count;
trace_adreno_drawctxt_invalidate(drawctxt);
@@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
kgsl_cancel_events_timestamp(device, &context->events,
list[i]->timestamp);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/* Make sure all pending events are processed or cancelled */
@@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret, count, i;
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
if (context == NULL)
return;
@@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
spin_unlock(&adreno_dev->active_list_lock);
spin_lock(&drawctxt->lock);
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
@@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
* detached status here.
*/
adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/*
@@ -499,13 +503,20 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
/*
* If the wait for global fails due to timeout then nothing after this
- * point is likely to work very well - BUG_ON() so we can take advantage
- * of the debug tools to figure out what the h - e - double hockey
- * sticks happened. If EAGAIN error is returned then recovery will kick
- * in and there will be no more commands in the RB pipe from this
- * context which is waht we are waiting for, so ignore -EAGAIN error
+ * point is likely to work very well - Get GPU snapshot and BUG_ON()
+ * so we can take advantage of the debug tools to figure out what the
+ * h - e - double hockey sticks happened. If EAGAIN error is returned
+ * then recovery will kick in and there will be no more commands in the
+ * RB pipe from this context which is waht we are waiting for, so ignore
+ * -EAGAIN error
*/
- BUG_ON(ret && ret != -EAGAIN);
+ if (ret && ret != -EAGAIN) {
+ KGSL_DRV_ERR(device, "Wait for global ts=%d type=%d error=%d\n",
+ drawctxt->internal_timestamp,
+ drawctxt->type, ret);
+ device->force_panic = 1;
+ kgsl_device_snapshot(device, context);
+ }
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5ea911954991..0578f16ae9e1 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -18,7 +18,7 @@ struct adreno_context_type {
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct kgsl_device;
@@ -32,20 +32,21 @@ struct kgsl_context;
* @internal_timestamp: Global timestamp of the last issued command
* NOTE: guarded by device->mutex, not drawctxt->mutex!
* @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @mutex: Mutex to protect the drawqueue
+ * @drawqueue: Queue of drawobjs waiting to be dispatched for this
+ * context
+ * @drawqueue_head: Head of the drawqueue queue
+ * @drawqueue_tail: Tail of the drawqueue queue
* @pending: Priority list node for the dispatcher list of pending contexts
* @wq: Workqueue structure for contexts to sleep pending room in the queue
* @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
+ * @queued: Number of commands queued in the drawqueue
+ * @fault_policy: GFT fault policy set in _skip_cmd();
* @debug_root: debugfs entry for this context.
* @queued_timestamp: The last timestamp that was queued on this context
* @rb: The ringbuffer in which this context submits commands.
* @submitted_timestamp: The last timestamp that was submitted for this context
- * @submit_retire_ticks: Array to hold cmdbatch execution times from submit
+ * @submit_retire_ticks: Array to hold command obj execution times from submit
* to retire
* @ticks_index: The index into submit_retire_ticks[] where the new delta will
* be written.
@@ -60,9 +61,9 @@ struct adreno_context {
spinlock_t lock;
/* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- unsigned int cmdqueue_head;
- unsigned int cmdqueue_tail;
+ struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
+ unsigned int drawqueue_head;
+ unsigned int drawqueue_tail;
struct plist_node pending;
wait_queue_head_t wq;
@@ -92,8 +93,9 @@ struct adreno_context {
* @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame
* marker.
* @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission.
- * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during
+ * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during
fault tolerance.
+ * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context.
*/
enum adreno_context_priv {
ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC,
@@ -102,6 +104,7 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_EOF,
ADRENO_CONTEXT_FORCE_PREAMBLE,
ADRENO_CONTEXT_SKIP_CMD,
+ ADRENO_CONTEXT_FENCE_LOG,
};
/* Flags for adreno_drawctxt_switch() */
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 8e354d71a291..42f8119ad8b4 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -598,28 +598,6 @@ int adreno_perfcounter_put(struct adreno_device *adreno_dev,
return -EINVAL;
}
-static int _perfcounter_enable_pwr(struct adreno_device *adreno_dev,
- unsigned int counter)
-{
- /* PWR counters enabled by default on A3XX/A4XX so nothing to do */
- if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev))
- return 0;
-
- /*
- * On 5XX we have to emulate the PWR counters which are physically
- * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
- * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
- * to take away too many of the generic RBBM counters.
- */
-
- if (counter == 0)
- return -EINVAL;
-
- kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
-
- return 0;
-}
-
static void _perfcounter_enable_vbif(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters, unsigned int counter,
unsigned int countable)
@@ -771,6 +749,7 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter, unsigned int countable)
{
struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (counters == NULL)
return -EINVAL;
@@ -786,7 +765,9 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
/* alwayson counter is global, so init value is 0 */
break;
case KGSL_PERFCOUNTER_GROUP_PWR:
- return _perfcounter_enable_pwr(adreno_dev, counter);
+ if (gpudev->enable_pwr_counters)
+ return gpudev->enable_pwr_counters(adreno_dev, counter);
+ return 0;
case KGSL_PERFCOUNTER_GROUP_VBIF:
if (countable > VBIF2_PERF_CNT_SEL_MASK)
return -EINVAL;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 07ef09034d7c..fc0602a60ac1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
sizedwords, 0, NULL);
}
-/**
- * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit
- * @device: The kgsl device pointer
- * @ibdesc: Pointer to the IB descriptor
- */
-static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = dev_priv->device;
- struct kgsl_process_private *private = dev_priv->process_priv;
-
- /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
- if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
- pr_context(device, context, "ctxt %d invalid ib size %lld\n",
- context->id, ib->size);
- return false;
- }
-
- /* Make sure that the address is mapped */
- if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
- pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
- context->id, ib->gpuaddr);
- return false;
- }
-
- return true;
-}
-
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_memobj_node *ib;
- int ret;
-
- if (kgsl_context_invalid(context))
- return -EDEADLK;
-
- /* Verify the IBs before they get queued */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
- if (_ringbuffer_verify_ib(dev_priv, context, ib) == false)
- return -EINVAL;
-
- /* wait for the suspend gate */
- wait_for_completion(&device->cmdbatch_gate);
-
- /*
- * Clear the wake on touch bit to indicate an IB has been
- * submitted since the last time we set it. But only clear
- * it when we have rendering commands.
- */
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)
- && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
- device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
-
- /* A3XX does not have support for command batch profiling */
- if (adreno_is_a3xx(adreno_dev) &&
- (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
- return -EOPNOTSUPP;
-
- /* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
- timestamp);
-
- /*
- * Return -EPROTO if the device has faulted since the last time we
- * checked - userspace uses this to perform post-fault activities
- */
- if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
- ret = -EPROTO;
-
- return ret;
-}
-
static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
+ struct kgsl_context *context = drawobj->context;
/*
* Check if the context has a constraint and constraint flags are
* set.
*/
if (context->pwr_constraint.type &&
((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
- (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
+ (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
context->id);
}
@@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
+ struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
- bool cmdbatch_user_profiling = false;
- bool cmdbatch_kernel_profiling = false;
+ bool user_profiling = false;
+ bool kernel_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
struct adreno_ringbuffer *rb;
- struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL;
+ struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
- struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry;
+ struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
- cmdbatch->profiling_buffer_gpuaddr);
+ cmdobj->profiling_buffer_gpuaddr);
- context = cmdbatch->context;
+ context = drawobj->context;
drawctxt = ADRENO_CONTEXT(context);
/* Get the total IBs in the list */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
numibs++;
rb = drawctxt->rb;
@@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* c) force preamble for commandbatch
*/
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) {
+ (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) {
- set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery);
- cmdbatch->fault_policy = drawctxt->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery);
+ cmdobj->fault_policy = drawctxt->fault_policy;
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/* if context is detached print fault recovery */
- adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch);
+ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
/* clear the drawctxt flags */
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
@@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if a context switch hasn't occured */
if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) &&
(rb->drawctxt_active == drawctxt))
use_preamble = false;
@@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* the accounting sane. Set start_index and numibs to 0 to just
* generate the start and end markers and skip everything else
*/
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) {
use_preamble = false;
numibs = 0;
}
@@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Each IB takes up 30 dwords in worst case */
dwords += (numibs * 30);
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
!adreno_is_a3xx(adreno_dev) && profile_buffer) {
- cmdbatch_user_profiling = true;
+ user_profiling = true;
dwords += 6;
/*
@@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
time = &local;
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) {
- cmdbatch_kernel_profiling = true;
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
+ kernel_profiling = true;
dwords += 6;
if (adreno_is_a5xx(adreno_dev))
dwords += 2;
@@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
started));
}
/*
- * Add cmds to read the GPU ticks at the start of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the start of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
}
if (numibs) {
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
* removed from consideration by the FT policy
@@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
adreno_is_preemption_enabled(adreno_dev))
cmds += gpudev->preemption_yield_enable(cmds);
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
retired));
}
/*
- * Add cmds to read the GPU ticks at the end of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the end of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
}
@@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
goto done;
}
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ if (test_bit(CMDOBJ_WFI, &cmdobj->priv))
flags = KGSL_CMD_FLAGS_WFI;
/*
@@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
/* Set the constraints before adding to ringbuffer */
- adreno_ringbuffer_set_constraint(device, cmdbatch);
+ adreno_ringbuffer_set_constraint(device, drawobj);
/* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, cmdbatch);
+ kgsl_cffdump_capture_ib_desc(device, context, cmdobj);
ret = adreno_ringbuffer_addcmds(rb, flags,
&link[0], (cmds - link),
- cmdbatch->timestamp, time);
+ drawobj->timestamp, time);
if (!ret) {
- cmdbatch->global_ts = drawctxt->internal_timestamp;
+ cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
/*
* Return kernel clock time to the the client
* if requested
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
uint64_t secs = time->ktime;
profile_buffer->wall_clock_ns =
@@ -1069,9 +992,8 @@ done:
kgsl_memdesc_unmap(&entry->memdesc);
- trace_kgsl_issueibcmds(device, context->id, cmdbatch,
- numibs, cmdbatch->timestamp,
- cmdbatch->flags, ret, drawctxt->type);
+ trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp,
+ drawobj->flags, ret, drawctxt->type);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index b126f710b5e6..63374af1e3f7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -119,7 +119,7 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct adreno_dispatcher_cmdqueue dispatch_q;
+ struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
unsigned int gpr11;
@@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj_cmd *cmdobj,
struct adreno_submit_time *time);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index f52ddfa894d5..16ca0980cfbe 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -27,8 +27,8 @@
#include "adreno_a5xx.h"
TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued),
+ TP_ARGS(drawobj, queued),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued,
__field(unsigned int, prio)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- __entry->prio = cmdbatch->context->priority;
+ __entry->flags = drawobj->flags;
+ __entry->prio = drawobj->context->priority;
),
TP_printk(
"ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s",
__entry->id, __entry->prio,
__entry->timestamp, __entry->queued,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none"
+ KGSL_DRAWOBJ_FLAGS) : "none"
)
);
TRACE_EVENT(adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
+ TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->flags = cmdbatch->flags;
+ __entry->flags = drawobj->flags;
__entry->ticks = ticks;
__entry->secs = secs;
__entry->usecs = usecs;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->id, __entry->prio, __entry->timestamp,
__entry->inflight,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->ticks, __entry->secs, __entry->usecs,
__entry->rb_id, __entry->rptr, __entry->wptr,
__entry->q_inflight
@@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
);
TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
+ struct adreno_ringbuffer *rb, unsigned int rptr,
+ unsigned long fault_recovery),
+ TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__field(unsigned int, rptr)
__field(unsigned int, wptr)
__field(int, q_inflight)
+ __field(unsigned long, fault_recovery)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- __entry->flags = cmdbatch->flags;
+ __entry->recovery = fault_recovery;
+ __entry->flags = drawobj->flags;
__entry->start = start;
__entry->retire = retire;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__print_flags(__entry->recovery, "|",
ADRENO_FT_TYPES) : "none",
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->start,
__entry->retire,
__entry->rb_id, __entry->rptr, __entry->wptr,
@@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired,
);
TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
+ TP_ARGS(cmdobj, fault),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, fault)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->fault = fault;
),
TP_printk(
@@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault,
);
TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action),
+ TP_ARGS(cmdobj, action),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, action)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->action = action;
),
TP_printk(
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 88581b079246..add4590bbb90 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -36,7 +36,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_sync.h"
@@ -1497,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_ringbuffer_issueibcmds *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
long result = -EINVAL;
/* The legacy functions don't support synchronization commands */
- if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
+ if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
+ return -EINVAL;
+
+ /* Sanity check the number of IBs */
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
+ (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
return -EINVAL;
/* Get the context */
@@ -1509,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
+ CMDOBJ_TYPE);
+ if (IS_ERR(cmdobj)) {
+ kgsl_context_put(context);
+ return PTR_ERR(cmdobj);
}
- if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) {
- /* Sanity check the number of IBs */
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) {
- result = -EINVAL;
- goto done;
- }
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
(void __user *) param->ibdesc_addr,
param->numibs);
- } else {
+ else {
struct kgsl_ibdesc ibdesc;
/* Ultra legacy path */
@@ -1533,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
ibdesc.sizedwords = param->numibs;
ibdesc.ctrl = 0;
- result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
}
- if (result)
- goto done;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ if (result == 0)
+ result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+ &drawobj, 1, &param->timestamp);
-done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
}
+/* Returns 0 on failure. Returns command type(s) on success */
+static unsigned int _process_command_input(struct kgsl_device *device,
+ unsigned int flags, unsigned int numcmds,
+ unsigned int numobjs, unsigned int numsyncs)
+{
+ if (numcmds > KGSL_MAX_NUMIBS ||
+ numobjs > KGSL_MAX_NUMIBS ||
+ numsyncs > KGSL_MAX_SYNCPOINTS)
+ return 0;
+
+ /*
+ * The SYNC bit is supposed to identify a dummy sync object
+ * so warn the user if they specified any IBs with it.
+ * A MARKER command can either have IBs or not but if the
+ * command has 0 IBs it is automatically assumed to be a marker.
+ */
+
+ /* If they specify the flag, go with what they say */
+ if (flags & KGSL_DRAWOBJ_MARKER)
+ return MARKEROBJ_TYPE;
+ else if (flags & KGSL_DRAWOBJ_SYNC)
+ return SYNCOBJ_TYPE;
+
+ /* If not, deduce what they meant */
+ if (numsyncs && numcmds)
+ return SYNCOBJ_TYPE | CMDOBJ_TYPE;
+ else if (numsyncs)
+ return SYNCOBJ_TYPE;
+ else if (numcmds)
+ return CMDOBJ_TYPE;
+ else if (numcmds == 0)
+ return MARKEROBJ_TYPE;
+
+ return 0;
+}
+
long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_submit_commands *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
- long result = -EINVAL;
-
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
-
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /* Make sure that we don't have too many syncpoints */
- if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds, 0,
+ param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
+ param->synclist, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
- param->cmdlist, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch,
- param->synclist, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
+ param->cmdlist, param->numcmds);
+ if (result)
+ goto done;
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
+
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1617,7 +1656,9 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
+
kgsl_context_put(context);
return result;
@@ -1629,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
struct kgsl_gpu_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- long result = -EINVAL;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
-
- /* Make sure that the memobj and syncpoint count isn't too big */
- if (param->numcmds > KGSL_MAX_NUMIBS ||
- param->numobjs > KGSL_MAX_NUMIBS ||
- param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds,
+ param->numobjs, param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+ to_user_ptr(param->synclist),
+ param->syncsize, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch,
- to_user_ptr(param->cmdlist),
- param->cmdsize, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
- result = kgsl_cmdbatch_add_memlist(device, cmdbatch,
- to_user_ptr(param->objlist),
- param->objsize, param->numobjs);
- if (result)
- goto done;
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_synclist(device, cmdbatch,
- to_user_ptr(param->synclist),
- param->syncsize, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
+
+ result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
+ to_user_ptr(param->cmdlist),
+ param->cmdsize, param->numcmds);
+ if (result)
+ goto done;
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
+ to_user_ptr(param->objlist),
+ param->objsize, param->numobjs);
+ if (result)
+ goto done;
+
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1693,7 +1740,8 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
kgsl_context_put(context);
return result;
@@ -4600,7 +4648,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_cmdbatch_exit();
+ kgsl_drawobj_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4676,7 +4724,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_cmdbatch_init();
+ result = kgsl_drawobj_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 7ac84b777051..826c4edb3582 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -28,6 +28,25 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+/*
+ * --- kgsl drawobj flags ---
+ * These flags are same as --- drawobj flags ---
+ * but renamed to reflect that cmdbatch is renamed to drawobj.
+ */
+#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
+#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
+#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
+#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
+#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
+#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
+#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
+#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
+#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
+#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
+
+#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
+
+
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 8e783f8ce017..3337570477f9 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
int ret = 0;
struct kgsl_memobj_node *ib;
@@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->size >> 2);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 315a097ba817..14bc397cb570 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val);
int kgsl_cff_dump_enable_get(void *data, u64 *val);
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj_cmd *cmdobj);
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5);
@@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
deleted file mode 100644
index d5cbf375b5d3..000000000000
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __KGSL_CMDBATCH_H
-#define __KGSL_CMDBATCH_H
-
-#define KGSL_CMDBATCH_FLAGS \
- { KGSL_CMDBATCH_MARKER, "MARKER" }, \
- { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
- { KGSL_CMDBATCH_SYNC, "SYNC" }, \
- { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
- { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
- { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" }
-
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @refcount: kref structure to maintain the reference count
- * @cmdlist: List of IBs to issue
- * @memlist: List of all memory used in this command batch
- * @synclist: Array of context/timestamp tuples to wait for before issuing
- * @numsyncs: Number of sync entries in the array
- * @pending: Bitmask of sync events that are active
- * @timer: a timer used to track possible sync timeouts for this cmdbatch
- * @marker_timestamp: For markers, the timestamp of the last "real" command that
- * was queued
- * @profiling_buf_entry: Mem entry containing the profiling buffer
- * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
- * for easy access
- * @profile_index: Index to store the start/stop ticks in the kernel profiling
- * buffer
- * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit.
- * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch
- * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the
- * timer will expire
- * This structure defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- struct kref refcount;
- struct list_head cmdlist;
- struct list_head memlist;
- struct kgsl_cmdbatch_sync_event *synclist;
- unsigned int numsyncs;
- unsigned long pending;
- struct timer_list timer;
- unsigned int marker_timestamp;
- struct kgsl_mem_entry *profiling_buf_entry;
- uint64_t profiling_buffer_gpuaddr;
- unsigned int profile_index;
- uint64_t submit_ticks;
- unsigned int global_ts;
- unsigned long timeout_jiffies;
-};
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @id: identifer (positiion within the pending bitmap)
- * @type: Syncpoint type
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- */
-struct kgsl_cmdbatch_sync_event {
- unsigned int id;
- int type;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch
- * in the profiling buffer
- * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
- * cmdbatch timer - this is used to avoid recursion
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
- CMDBATCH_FLAG_PROFILE,
- CMDBATCH_FLAG_FENCE_LOG,
-};
-
-
-int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_ibdesc *ibdesc);
-
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync);
-
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags);
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc);
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-
-int kgsl_cmdbatch_init(void);
-void kgsl_cmdbatch_exit(void);
-
-void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
-}
-
-static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch,
- unsigned int bit)
-{
- if (bit >= KGSL_MAX_SYNCPOINTS)
- return false;
-
- return test_bit(bit, &cmdbatch->pending);
-}
-
-#endif /* __KGSL_CMDBATCH_H */
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index ca1685e5fcf5..7681d74fb108 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size)
return (compat_size_t)size;
}
-int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
- struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist,
+int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
+ struct kgsl_drawobj *drawobj, void __user *cmdlist,
unsigned int numcmds, void __user *synclist,
unsigned int numsyncs);
@@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
#else
-static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device,
- unsigned int flags, struct kgsl_cmdbatch *cmdbatch,
+static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
+ unsigned int flags, struct kgsl_drawobj *drawobj,
void __user *cmdlist, unsigned int numcmds,
void __user *synclist, unsigned int numsyncs)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 24511a4de6f1..04935e8d0019 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,7 +25,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_snapshot.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
@@ -127,9 +127,9 @@ struct kgsl_functable {
unsigned int msecs);
int (*readtimestamp) (struct kgsl_device *device, void *priv,
enum kgsl_timestamp_type type, unsigned int *timestamp);
- int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamps);
+ int (*queue_cmds)(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
@@ -186,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
/**
* struct kgsl_memobj_node - Memory object descriptor
- * @node: Local list node for the cmdbatch
+ * @node: Local list node for the object
* @id: GPU memory ID for the object
* offset: Offset within the object
* @gpuaddr: GPU address for the object
@@ -235,7 +235,7 @@ struct kgsl_device {
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
- struct completion cmdbatch_gate;
+ struct completion halt_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
@@ -292,7 +292,7 @@ struct kgsl_device {
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
+ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.context_idr = IDR_INIT((_dev).context_idr),\
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c
index 6272410ce544..7840daa6a3e2 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,17 +11,17 @@
*/
/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
+ * KGSL drawobj management
+ * A drawobj is a single submission from userland. The drawobj
* encapsulates everything about the submission : command buffers, flags and
* sync points.
*
* Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. All synpoints are contained in an
- * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be
+ * drawobj can be queued to the hardware. All synpoints are contained in an
+ * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
* multiple types of events both internal ones (GPU events) and external
* triggers. As the events expire bits are cleared in a pending bitmap stored
- * in the command batch. The GPU will submit the command as soon as the bitmap
+ * in the drawobj. The GPU will submit the command as soon as the bitmap
* goes to zero indicating no more pending events.
*/
@@ -31,7 +31,7 @@
#include "kgsl.h"
#include "kgsl_device.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"
#include "kgsl_compat.h"
@@ -42,26 +42,43 @@
*/
static struct kmem_cache *memobjs_cache;
-/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
- */
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_object(struct kref *kref)
{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+ struct kgsl_drawobj *drawobj = container_of(kref,
+ struct kgsl_drawobj, refcount);
+ struct kgsl_drawobj_sync *syncobj;
+
+ kgsl_context_put(drawobj->context);
+
+ switch (drawobj->type) {
+ case SYNCOBJ_TYPE:
+ syncobj = SYNCOBJ(drawobj);
+ kfree(syncobj->synclist);
+ kfree(syncobj);
+ break;
+ case CMDOBJ_TYPE:
+ case MARKEROBJ_TYPE:
+ kfree(CMDOBJ(drawobj));
+ break;
+ }
+}
+
+static inline void drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, drawobj_destroy_object);
}
void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
}
-static void _kgsl_cmdbatch_timer(unsigned long data)
+static void syncobj_timer(unsigned long data)
{
struct kgsl_device *device;
- struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (cmdbatch == NULL || cmdbatch->context == NULL)
+ if (syncobj == NULL || drawobj->context == NULL)
return;
- device = cmdbatch->context->device;
+ device = drawobj->context->device;
dev_err(device->dev,
"kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ drawobj->context->id, drawobj->timestamp);
- set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
- kgsl_context_dump(cmdbatch->context);
- clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
+ set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
+ kgsl_context_dump(drawobj->context);
+ clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data)
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
-/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
- * @kref: Pointer to the kref structure for this object
- *
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
- */
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
-{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
-
- kgsl_context_put(cmdbatch->context);
-
- kfree(cmdbatch->synclist);
- kfree(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
-
/*
* a generic function to retire a pending sync event and (possibly)
* kick the dispatcher
*/
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
+static void drawobj_sync_expire(struct kgsl_device *device,
+ struct kgsl_drawobj_sync_event *event)
{
+ struct kgsl_drawobj_sync *syncobj = event->syncobj;
/*
* Clear the event from the pending mask - if it is already clear, then
* leave without doing anything useful
*/
- if (!test_and_clear_bit(event->id, &event->cmdbatch->pending))
+ if (!test_and_clear_bit(event->id, &syncobj->pending))
return;
/*
* If no more pending events, delete the timer and schedule the command
* for dispatch
*/
- if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) {
- del_timer_sync(&event->cmdbatch->timer);
+ if (!kgsl_drawobj_events_pending(event->syncobj)) {
+ del_timer_sync(&syncobj->timer);
if (device->ftbl->drawctxt_sched)
device->ftbl->drawctxt_sched(device,
- event->cmdbatch->context);
+ event->syncobj->base.context);
}
}
@@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
* This function is called by the GPU event when the sync event timestamp
* expires
*/
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device,
+static void drawobj_sync_func(struct kgsl_device *device,
struct kgsl_event_group *group, void *priv, int result)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_timestamp_expire(event->cmdbatch,
+ trace_syncpoint_timestamp_expire(event->syncobj,
event->context, event->timestamp);
- kgsl_cmdbatch_sync_expire(device, event);
+ drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-static inline void _free_memobj_list(struct list_head *list)
+static inline void memobj_list_free(struct list_head *list)
{
struct kgsl_memobj_node *mem, *tmpmem;
@@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list)
}
}
-/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
- *
- * Start the process of destroying a command batch. Cancel any pending events
- * and decrement the refcount. Asynchronous events can still signal after
- * kgsl_cmdbatch_destroy has returned.
- */
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
- unsigned int i;
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
unsigned long pending;
-
- if (IS_ERR_OR_NULL(cmdbatch))
- return;
+ unsigned int i;
/* Zap the canary timer */
- del_timer_sync(&cmdbatch->timer);
+ del_timer_sync(&syncobj->timer);
/*
* Copy off the pending list and clear all pending events - this will
* render any subsequent asynchronous callback harmless
*/
- bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
-
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
@@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- kgsl_cancel_event(cmdbatch->device,
+ kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
break;
}
}
/*
- * Release the the refcount on the mem entry associated with the
- * cmdbatch profiling buffer
+ * If we cancelled an event, there's a good chance that the context is
+ * on a dispatcher queue, so schedule to get it removed.
+ */
+ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
+ drawobj->device->ftbl->drawctxt_sched)
+ drawobj->device->ftbl->drawctxt_sched(drawobj->device,
+ drawobj->context);
+
+}
+
+static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
+{
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
+
+ /*
+ * Release the refcount on the mem entry associated with the
+ * ib profiling buffer
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)
- kgsl_mem_entry_put(cmdbatch->profiling_buf_entry);
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
+ kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
/* Destroy the cmdlist we created */
- _free_memobj_list(&cmdbatch->cmdlist);
+ memobj_list_free(&cmdobj->cmdlist);
/* Destroy the memlist we created */
- _free_memobj_list(&cmdbatch->memlist);
+ memobj_list_free(&cmdobj->memlist);
+}
- /*
- * If we cancelled an event, there's a good chance that the context is
- * on a dispatcher queue, so schedule to get it removed.
+/**
+ * kgsl_drawobj_destroy() - Destroy a kgsl object structure
+ * @obj: Pointer to the kgsl object to destroy
+ *
+ * Start the process of destroying a command batch. Cancel any pending events
+ * and decrement the refcount. Asynchronous events can still signal after
+ * kgsl_drawobj_destroy has returned.
*/
- if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
- cmdbatch->device->ftbl->drawctxt_sched)
- cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device,
- cmdbatch->context);
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
+{
+ if (!drawobj)
+ return;
+
+ if (drawobj->type & SYNCOBJ_TYPE)
+ drawobj_destroy_sync(drawobj);
+ else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
+ drawobj_destroy_cmd(drawobj);
+ else
+ return;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+EXPORT_SYMBOL(kgsl_drawobj_destroy);
-/*
- * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
- * when a fence is expired
- */
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
+static void drawobj_sync_fence_func(void *priv)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_fence_expire(event->cmdbatch,
+ trace_syncpoint_fence_expire(event->syncobj,
event->handle ? event->handle->name : "unknown");
- kgsl_cmdbatch_sync_expire(event->device, event);
+ drawobj_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new fence sync syncpoint to the cmdbatch.
+ * Add a new fence sync syncpoint to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int id;
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->device = device;
event->context = NULL;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
+ drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
- clear_bit(event->id, &cmdbatch->pending);
+ clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
* message so we can track that
*/
if (ret == 0)
- trace_syncpoint_fence_expire(cmdbatch, "signaled");
+ trace_syncpoint_fence_expire(syncobj, "signaled");
return ret;
}
- trace_syncpoint_fence(cmdbatch, event->handle->name);
+ trace_syncpoint_fence(syncobj, event->handle->name);
return 0;
}
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new sync point timestamp event to the cmdbatch.
+ * Add a new sync point timestamp event to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_context *context = kgsl_context_get(device,
sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
int ret = -EINVAL;
unsigned int id;
@@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
* create a sync point on a future timestamp.
*/
- if (context == cmdbatch->context) {
+ if (context == drawobj->context) {
unsigned int queued;
+
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&queued);
@@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
}
}
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->context = context;
event->timestamp = sync->timestamp;
event->device = device;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
ret = kgsl_add_event(device, &context->events, sync->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
if (ret) {
- clear_bit(event->id, &cmdbatch->pending);
- kgsl_cmdbatch_put(cmdbatch);
+ clear_bit(event->id, &syncobj->pending);
+ drawobj_put(drawobj);
} else {
- trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp);
+ trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
done:
@@ -430,43 +446,46 @@ done:
}
/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
+ * batch
* @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
+ * @syncobj: Pointer to the sync obj
* @sync: Pointer to the user-specified struct defining the syncpoint
*
- * Create a new sync point in the cmdbatch based on the user specified
- * parameters
+ * Create a new sync point in the sync obj based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync)
{
void *priv;
int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ int (*func)(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
void *priv);
switch (sync->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
+ func = drawobj_add_sync_timestamp;
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
+ func = drawobj_add_sync_fence;
break;
default:
KGSL_DRV_ERR(device,
"bad syncpoint type ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
if (sync->size != psize) {
KGSL_DRV_ERR(device,
"bad syncpoint size ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
@@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
return -EFAULT;
}
- ret = func(device, cmdbatch, priv);
+ ret = func(device, syncobj, priv);
kfree(priv);
return ret;
}
static void add_profiling_buffer(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size,
+ struct kgsl_drawobj_cmd *cmdobj,
+ uint64_t gpuaddr, uint64_t size,
unsigned int id, uint64_t offset)
{
struct kgsl_mem_entry *entry;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return;
/* Only the first buffer entry counts - ignore the rest */
- if (cmdbatch->profiling_buf_entry != NULL)
+ if (cmdobj->profiling_buf_entry != NULL)
return;
if (id != 0)
- entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
id);
else
- entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
gpuaddr);
if (entry != NULL) {
@@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device,
if (entry == NULL) {
KGSL_DRV_ERR(device,
"ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
- cmdbatch->context->id, id, offset, gpuaddr, size);
+ drawobj->context->id, id, offset, gpuaddr, size);
return;
}
- cmdbatch->profiling_buf_entry = entry;
+ cmdobj->profiling_buf_entry = entry;
if (id != 0)
- cmdbatch->profiling_buffer_gpuaddr =
+ cmdobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
else
- cmdbatch->profiling_buffer_gpuaddr = gpuaddr;
+ cmdobj->profiling_buffer_gpuaddr = gpuaddr;
}
/**
- * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch
- * @cmdbatch: Pointer to the cmdbatch
+ * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
+ * batch
+ * @cmdobj: Pointer to the ib
* @ibdesc: Pointer to the user-specified struct defining the memory or IB
*
- * Create a new memory entry in the cmdbatch based on the user specified
- * parameters
+ * Create a new memory entry in the ib based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc)
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
{
uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
struct kgsl_memobj_node *mem;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
/* sanitize the ibdesc ctrl flags */
ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
- add_profiling_buffer(device, cmdbatch,
+ add_profiling_buffer(device, cmdobj,
gpuaddr, size, 0, 0);
return 0;
}
}
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ /* Ignore if SYNC or MARKER is specified */
+ if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
return 0;
mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
@@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
mem->offset = 0;
mem->flags = 0;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
- ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
+ ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
/* add to the memlist */
- list_add_tail(&mem->node, &cmdbatch->memlist);
- } else {
+ list_add_tail(&mem->node, &cmdobj->memlist);
+ else {
/* set the preamble flag if directed to */
- if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE &&
- list_empty(&cmdbatch->cmdlist))
+ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
+ list_empty(&cmdobj->cmdlist))
mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
/* add to the cmd list */
- list_add_tail(&mem->node, &cmdbatch->cmdlist);
+ list_add_tail(&mem->node, &cmdobj->cmdlist);
}
return 0;
}
+static inline int drawobj_init(struct kgsl_device *device,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+ unsigned int type)
+{
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this object
+ */
+ if (!_kgsl_context_get(context))
+ return -ENOENT;
+
+ kref_init(&drawobj->refcount);
+
+ drawobj->device = device;
+ drawobj->context = context;
+ drawobj->type = type;
+
+ return 0;
+}
+
/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * kgsl_drawobj_sync_create() - Create a new sync obj
+ * structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
- * @flags: Flags for the cmdbatch
*
- * Allocate an new cmdbatch structure
+ * Allocate an new kgsl_drawobj_sync structure
*/
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags)
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context)
{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
+ struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
+ GFP_KERNEL);
+ if (syncobj == NULL)
return ERR_PTR(-ENOMEM);
- /*
- * Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
- */
+ if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
+ kfree(syncobj);
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+
+ return syncobj;
+}
+
+/**
+ * kgsl_drawobj_cmd_create() - Create a new command obj
+ * structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the command obj
+ * @type: type of cmdobj MARKER/CMD
+ *
+ * Allocate a new kgsl_drawobj_cmd structure
+ */
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type)
+{
+ struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ struct kgsl_drawobj *drawobj;
+
+ if (cmdobj == NULL)
+ return ERR_PTR(-ENOMEM);
- if (!_kgsl_context_get(context)) {
- kfree(cmdbatch);
+ type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
+ if (type == 0) {
+ kfree(cmdobj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (drawobj_init(device, context, drawobj, type)) {
+ kfree(cmdobj);
return ERR_PTR(-ENOENT);
}
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->cmdlist);
- INIT_LIST_HEAD(&cmdbatch->memlist);
-
- cmdbatch->device = device;
- cmdbatch->context = context;
- /* sanitize our flags for cmdbatches */
- cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH
- | KGSL_CMDBATCH_MARKER
- | KGSL_CMDBATCH_END_OF_FRAME
- | KGSL_CMDBATCH_SYNC
- | KGSL_CMDBATCH_PWR_CONSTRAINT
- | KGSL_CMDBATCH_MEMLIST
- | KGSL_CMDBATCH_PROFILING
- | KGSL_CMDBATCH_PROFILING_KTIME);
+ /* sanitize our flags for drawobj's */
+ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_END_OF_FRAME
+ | KGSL_DRAWOBJ_PWR_CONSTRAINT
+ | KGSL_DRAWOBJ_MEMLIST
+ | KGSL_DRAWOBJ_PROFILING
+ | KGSL_DRAWOBJ_PROFILING_KTIME);
- /* Add a timer to help debug sync deadlocks */
- setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
- (unsigned long) cmdbatch);
+ INIT_LIST_HEAD(&cmdobj->cmdlist);
+ INIT_LIST_HEAD(&cmdobj->memlist);
- return cmdbatch;
+ return cmdobj;
}
#ifdef CONFIG_COMPAT
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
int i, ret = 0;
struct kgsl_ibdesc_compat ibdesc32;
@@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
break;
@@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint_compat sync32;
struct kgsl_cmd_syncpoint sync;
@@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device,
sync.priv = compat_ptr(sync32.priv);
sync.size = (size_t) sync32.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
break;
@@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device,
}
#else
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
return -EINVAL;
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
return -EINVAL;
}
#endif
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+/* Returns:
+ * -EINVAL: Bad data
+ * 0: All data fields are empty (nothing to do)
+ * 1: All list information is valid
+ */
+static int _verify_input_list(unsigned int count, void __user *ptr,
+ unsigned int size)
+{
+ /* Return early if nothing going on */
+ if (count == 0 && ptr == NULL && size == 0)
+ return 0;
+
+ /* Sanity check inputs */
+ if (count == 0 || ptr == NULL || size == 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
struct kgsl_ibdesc ibdesc;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
int i, ret;
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
+ return 0;
+
+ ret = _verify_input_list(count, ptr, sizeof(ibdesc));
+ if (ret <= 0)
+ return -EINVAL;
+
if (is_compat_task())
- return add_ibdesc_list_compat(device, cmdbatch, ptr, count);
+ return add_ibdesc_list_compat(device, cmdobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&ibdesc, 0, sizeof(ibdesc));
@@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
return ret;
@@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint sync;
int i, ret;
@@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (count == 0)
return 0;
- if (count > KGSL_MAX_SYNCPOINTS)
- return -EINVAL;
-
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
if (is_compat_task())
- return add_syncpoints_compat(device, cmdbatch, ptr, count);
+ return add_syncpoints_compat(device, syncobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&sync, 0, sizeof(sync));
@@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (copy_from_user(&sync, ptr, sizeof(sync)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int kgsl_cmdbatch_add_object(struct list_head *head,
+static int drawobj_add_object(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head,
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
KGSL_CMDLIST_IB_PREAMBLE)
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
-
- /* Ignore all if SYNC or MARKER is specified */
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
- return 0;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
if (!(obj.flags & CMDLIST_FLAGS)) {
KGSL_DRV_ERR(device,
"invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ baseobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
- ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj);
+ ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
KGSL_DRV_ERR(device,
"invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
- obj.offset, obj.gpuaddr, obj.size);
+ DRAWOBJ(cmdobj)->context->id, obj.flags,
+ obj.id, obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
if (obj.flags & KGSL_OBJLIST_PROFILE)
- add_profiling_buffer(device, cmdbatch, obj.gpuaddr,
+ add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist,
- &obj);
+ ret = drawobj_add_object(&cmdobj->memlist, &obj);
if (ret)
return ret;
}
@@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_syncpoint syncpoint;
struct kgsl_cmd_syncpoint sync;
- int i, ret = 0;
-
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
- return 0;
-
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ int i, ret;
- if (count > KGSL_MAX_SYNCPOINTS)
+ /* If creating a sync and the data is not there or wrong then error */
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
@@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
sync.priv = to_user_ptr(syncpoint.priv);
sync.size = syncpoint.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_cmdbatch_exit(void)
+void kgsl_drawobj_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
}
-int kgsl_cmdbatch_init(void)
+int kgsl_drawobj_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
new file mode 100644
index 000000000000..89ed944c539a
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KGSL_DRAWOBJ_H
+#define __KGSL_DRAWOBJ_H
+
+#define DRAWOBJ(obj) (&obj->base)
+#define SYNCOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_sync, base)
+#define CMDOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_cmd, base)
+
+#define CMDOBJ_TYPE BIT(0)
+#define MARKEROBJ_TYPE BIT(1)
+#define SYNCOBJ_TYPE BIT(2)
+
+/**
+ * struct kgsl_drawobj - KGSL drawobj descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @type: Object type
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @refcount: kref structure to maintain the reference count
+ */
+struct kgsl_drawobj {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ uint32_t type;
+ uint32_t timestamp;
+ unsigned long flags;
+ struct kref refcount;
+};
+
+/**
+ * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
+ * cmds also since markers are special form of cmds that do not
+ * need their cmds to be executed.
+ * @base: Base kgsl_drawobj
+ * @priv: Internal flags
+ * @global_ts: The ringbuffer timestamp corresponding to this
+ * command obj
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * be hung
+ * @refcount: kref structure to maintain the reference count
+ * @cmdlist: List of IBs to issue
+ * @memlist: List of all memory used in this command batch
+ * @marker_timestamp: For markers, the timestamp of the last "real" command that
+ * was queued
+ * @profiling_buf_entry: Mem entry containing the profiling buffer
+ * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
+ * for easy access
+ * @profile_index: Index to store the start/stop ticks in the kernel profiling
+ * buffer
+ * @submit_ticks: Variable to hold ticks at the time of
+ * command obj submit.
+
+ */
+struct kgsl_drawobj_cmd {
+ struct kgsl_drawobj base;
+ unsigned long priv;
+ unsigned int global_ts;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ struct list_head cmdlist;
+ struct list_head memlist;
+ unsigned int marker_timestamp;
+ struct kgsl_mem_entry *profiling_buf_entry;
+ uint64_t profiling_buffer_gpuaddr;
+ unsigned int profile_index;
+ uint64_t submit_ticks;
+};
+
+/**
+ * struct kgsl_drawobj_sync - KGSL sync object
+ * @base: Base kgsl_drawobj, this needs to be the first entry
+ * @synclist: Array of context/timestamp tuples to wait for before issuing
+ * @numsyncs: Number of sync entries in the array
+ * @pending: Bitmask of sync events that are active
+ * @timer: a timer used to track possible sync timeouts for this
+ * sync obj
+ * @timeout_jiffies: For a sync obj the jiffies at
+ * which the timer will expire
+ */
+struct kgsl_drawobj_sync {
+ struct kgsl_drawobj base;
+ struct kgsl_drawobj_sync_event *synclist;
+ unsigned int numsyncs;
+ unsigned long pending;
+ struct timer_list timer;
+ unsigned long timeout_jiffies;
+};
+
+/**
+ * struct kgsl_drawobj_sync_event
+ * @id: identifer (positiion within the pending bitmap)
+ * @type: Syncpoint type
+ * @syncobj: Pointer to the syncobj that owns the sync event
+ * @context: KGSL context for whose timestamp we want to
+ * register this event
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_drawobj_sync_event {
+ unsigned int id;
+ int type;
+ struct kgsl_drawobj_sync *syncobj;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+};
+
+#define KGSL_DRAWOBJ_FLAGS \
+ { KGSL_DRAWOBJ_MARKER, "MARKER" }, \
+ { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
+ { KGSL_DRAWOBJ_SYNC, "SYNC" }, \
+ { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \
+ { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
+ { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" }
+
+/**
+ * enum kgsl_drawobj_cmd_priv - Internal command obj flags
+ * @CMDOBJ_SKIP - skip the entire command obj
+ * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for
+ * command obj
+ * @CMDOBJ_WFI - Force wait-for-idle for the submission
+ * @CMDOBJ_PROFILE - store the start / retire ticks for
+ * the command obj in the profiling buffer
+ */
+enum kgsl_drawobj_cmd_priv {
+ CMDOBJ_SKIP = 0,
+ CMDOBJ_FORCE_PREAMBLE,
+ CMDOBJ_WFI,
+ CMDOBJ_PROFILE,
+};
+
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type);
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc);
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count);
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context);
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ int count);
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_cmd_syncpoint *sync);
+
+int kgsl_drawobj_init(void);
+void kgsl_drawobj_exit(void);
+
+void kgsl_dump_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj);
+
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+
+static inline bool kgsl_drawobj_events_pending(
+ struct kgsl_drawobj_sync *syncobj)
+{
+ return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
+}
+
+static inline bool kgsl_drawobj_event_pending(
+ struct kgsl_drawobj_sync *syncobj, unsigned int bit)
+{
+ if (bit >= KGSL_MAX_SYNCPOINTS)
+ return false;
+
+ return test_bit(bit, &syncobj->pending);
+}
+#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 71b6086423d6..9f35a3197a4c 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
@@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
if (kgsl_mmu_is_perprocess(mmu)) {
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_PROCID, &pt->name);
@@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
unsigned int cb_num;
@@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
if (ret) {
@@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
goto done;
}
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = _attach_pt(iommu_pt, ctx);
if (ret)
goto done;
@@ -2492,7 +2480,6 @@ static const struct {
{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
{ "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
{ "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
- { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
};
static int _kgsl_iommu_probe(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index acbc0e784cf2..3e32c25b3dbe 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_FORCE_32BIT BIT(5)
/* 64 bit address is live */
#define KGSL_MMU_64BIT BIT(6)
-/* MMU can do coherent hardware table walks */
-#define KGSL_MMU_COHERENT_HTW BIT(7)
/* The MMU supports non-contigious pages */
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index d71c6a63f2d3..172de7406c26 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1387,6 +1387,47 @@ done:
return 0;
}
+static ssize_t kgsl_pwrctrl_pwrscale_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ int ret;
+ unsigned int enable = 0;
+
+ if (device == NULL)
+ return 0;
+
+ ret = kgsl_sysfs_store(buf, &enable);
+ if (ret)
+ return ret;
+
+ mutex_lock(&device->mutex);
+
+ if (enable)
+ kgsl_pwrscale_enable(device);
+ else
+ kgsl_pwrscale_disable(device, false);
+
+ mutex_unlock(&device->mutex);
+
+ return count;
+}
+
+static ssize_t kgsl_pwrctrl_pwrscale_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrscale *psc;
+
+ if (device == NULL)
+ return 0;
+ psc = &device->pwrscale;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", psc->enabled);
+}
+
static DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show,
kgsl_pwrctrl_gpuclk_store);
static DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
@@ -1449,6 +1490,9 @@ static DEVICE_ATTR(clock_mhz, 0444, kgsl_pwrctrl_clock_mhz_show, NULL);
static DEVICE_ATTR(freq_table_mhz, 0444,
kgsl_pwrctrl_freq_table_mhz_show, NULL);
static DEVICE_ATTR(temp, 0444, kgsl_pwrctrl_temp_show, NULL);
+static DEVICE_ATTR(pwrscale, 0644,
+ kgsl_pwrctrl_pwrscale_show,
+ kgsl_pwrctrl_pwrscale_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
@@ -1477,6 +1521,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_clock_mhz,
&dev_attr_freq_table_mhz,
&dev_attr_temp,
+ &dev_attr_pwrscale,
NULL
};
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 01d3b74c16fd..85cd29b5364e 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -189,19 +189,21 @@ EXPORT_SYMBOL(kgsl_pwrscale_update);
/*
* kgsl_pwrscale_disable - temporarily disable the governor
* @device: The device
+ * @turbo: Indicates if pwrlevel should be forced to turbo
*
* Temporarily disable the governor, to prevent interference
* with profiling tools that expect a fixed clock frequency.
* This function must be called with the device mutex locked.
*/
-void kgsl_pwrscale_disable(struct kgsl_device *device)
+void kgsl_pwrscale_disable(struct kgsl_device *device, bool turbo)
{
BUG_ON(!mutex_is_locked(&device->mutex));
if (device->pwrscale.devfreqptr)
queue_work(device->pwrscale.devfreq_wq,
&device->pwrscale.devfreq_suspend_ws);
device->pwrscale.enabled = false;
- kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
+ if (turbo)
+ kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
}
EXPORT_SYMBOL(kgsl_pwrscale_disable);
diff --git a/drivers/gpu/msm/kgsl_pwrscale.h b/drivers/gpu/msm/kgsl_pwrscale.h
index c85317869f1d..0756a4490f22 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.h
+++ b/drivers/gpu/msm/kgsl_pwrscale.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -123,7 +123,7 @@ void kgsl_pwrscale_sleep(struct kgsl_device *device);
void kgsl_pwrscale_wake(struct kgsl_device *device);
void kgsl_pwrscale_enable(struct kgsl_device *device);
-void kgsl_pwrscale_disable(struct kgsl_device *device);
+void kgsl_pwrscale_disable(struct kgsl_device *device, bool turbo);
int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags);
int kgsl_devfreq_get_dev_status(struct device *, struct devfreq_dev_status *);
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 4ef9f80177d6..6438c6e65b97 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
unsigned int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp,
+ TP_ARGS(device, drawctxt_id, numibs, timestamp,
flags, result, type),
TP_STRUCT__entry(
@@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds,
__entry->numibs,
__entry->timestamp,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "None",
+ KGSL_DRAWOBJ_FLAGS) : "None",
__entry->result,
__print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES)
)
@@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy,
);
DECLARE_EVENT_CLASS(syncpoint_timestamp_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp),
+ TP_ARGS(syncobj, context, timestamp),
TP_STRUCT__entry(
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
__field(unsigned int, context_id)
__field(unsigned int, timestamp)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__entry->context_id = context->id;
__entry->timestamp = timestamp;
),
TP_printk("ctx=%d sync ctx=%d ts=%d",
- __entry->cmdbatch_context_id, __entry->context_id,
+ __entry->syncobj_context_id, __entry->context_id,
__entry->timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DECLARE_EVENT_CLASS(syncpoint_fence_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name),
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name),
TP_STRUCT__entry(
__string(fence_name, name)
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__assign_str(fence_name, name);
),
TP_printk("ctx=%d fence=%s",
- __entry->cmdbatch_context_id, __get_str(fence_name))
+ __entry->syncobj_context_id, __get_str(fence_name))
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
TRACE_EVENT(kgsl_msg,
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 306465ededf9..766b052ade1d 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1143,6 +1143,12 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
goto err;
}
+ if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
+ drvdata->vaddr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
if (!drvdata->enable)
goto out;
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index bb44b6d82ccd..ebb49230d4d7 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/qpnp/qpnp-revid.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -150,13 +151,18 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
-#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
+#define FG_ADC_RR_DIE_TEMP_OFFSET 601400
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4
-#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
+#define FAB_ID_GF 0x30
+#define FAB_ID_SMIC 0x11
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1296794
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3858
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1339518
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3598
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -201,6 +207,8 @@ struct rradc_chip {
struct iio_chan_spec *iio_chans;
unsigned int nchannels;
struct rradc_chan_prop *chan_props;
+ struct device_node *revid_dev_node;
+ struct pmic_revid_data *pmic_fab_id;
};
struct rradc_channels {
@@ -347,16 +355,34 @@ static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = (int64_t) adc_code * 4;
- temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+ uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -380,15 +406,33 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -516,7 +560,7 @@ static int rradc_do_conversion(struct rradc_chip *chip,
buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_warn("%s is not ready; nothing to read\n",
+ pr_debug("%s is not ready; nothing to read\n",
rradc_chans[prop->channel].datasheet_name);
rc = -ENODATA;
goto fail;
@@ -653,6 +697,22 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
}
chip->base = base;
+ chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (chip->revid_dev_node) {
+ chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+ if (IS_ERR(chip->pmic_fab_id)) {
+ rc = PTR_ERR(chip->pmic_fab_id);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pmic_fab_id->fab_id == -EINVAL) {
+ rc = chip->pmic_fab_id->fab_id;
+ pr_debug("Unable to read fabid rc=%d\n", rc);
+ }
+ }
+
iio_chan = chip->iio_chans;
for (i = 0; i < RR_ADC_MAX; i++) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 31369d8c0ef3..9c1380b65b77 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -823,4 +823,6 @@ config INPUT_DRV2667_HAPTICS
To compile this driver as a module, choose M here: the
module will be called drv2667-haptics.
+source "drivers/input/misc/ots_pat9125/Kconfig"
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4019f19dd848..4e806ac056ce 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += ots_pat9125/
diff --git a/drivers/input/misc/ots_pat9125/Kconfig b/drivers/input/misc/ots_pat9125/Kconfig
new file mode 100644
index 000000000000..af82edd0faae
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Kconfig
@@ -0,0 +1,14 @@
+#
+# PixArt OTS switch driver configuration
+#
+
+config INPUT_PIXART_OTS_PAT9125_SWITCH
+ tristate "PixArt PAT9125 Rotating Switch driver"
+ depends on INPUT && I2C && GPIOLIB
+ help
+ Say Y to enable support for the PixArt OTS pat9125
+ rotating switch driver.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ots_pat9125.
diff --git a/drivers/input/misc/ots_pat9125/Makefile b/drivers/input/misc/ots_pat9125/Makefile
new file mode 100644
index 000000000000..a697caf69644
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the PixArt OST switch driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += pat9125_linux_driver.o pixart_ots.o
diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
new file mode 100644
index 000000000000..fa5e4cca129d
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
@@ -0,0 +1,627 @@
+/* drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include "pixart_ots.h"
+
+struct pixart_pat9125_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ int irq_gpio;
+ u32 press_keycode;
+ bool press_en;
+ bool inverse_x;
+ bool inverse_y;
+ struct regulator *vdd;
+ struct regulator *vld;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pinctrl_state_active;
+ struct pinctrl_state *pinctrl_state_suspend;
+ struct pinctrl_state *pinctrl_state_release;
+};
+
+/* Declaration of suspend and resume functions */
+static int pat9125_suspend(struct device *dev);
+static int pat9125_resume(struct device *dev);
+
+static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data,
+ int len)
+{
+ u8 buf[MAX_BUF_SIZE];
+ int ret = 0, i;
+ struct device *dev = &client->dev;
+
+ buf[0] = reg;
+ if (len >= MAX_BUF_SIZE) {
+ dev_err(dev, "%s Failed: buffer size is %d [Max Limit is %d]\n",
+ __func__, len, MAX_BUF_SIZE);
+ return -ENODEV;
+ }
+ for (i = 0 ; i < len; i++)
+ buf[i+1] = data[i];
+ /* Returns negative errno, or else the number of bytes written. */
+ ret = i2c_master_send(client, buf, len+1);
+ if (ret != len+1)
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+
+ return ret;
+}
+
+static int pat9125_i2c_read(struct i2c_client *client, u8 reg, u8 *data)
+{
+ u8 buf[MAX_BUF_SIZE];
+ int ret;
+ struct device *dev = &client->dev;
+
+ buf[0] = reg;
+ /*
+ * If everything went ok (1 msg transmitted), return #bytes transmitted,
+ * else error code. thus if transmit is ok return value 1
+ */
+ ret = i2c_master_send(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+ return ret;
+ }
+ /* returns negative errno, or else the number of bytes read */
+ ret = i2c_master_recv(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: reading reg 0x%x\n", __func__, reg);
+ return ret;
+ }
+ *data = buf[0];
+
+ return ret;
+}
+
+u8 read_data(struct i2c_client *client, u8 addr)
+{
+ u8 data = 0xff;
+
+ pat9125_i2c_read(client, addr, &data);
+ return data;
+}
+
+void write_data(struct i2c_client *client, u8 addr, u8 data)
+{
+ pat9125_i2c_write(client, addr, &data, 1);
+}
+
+static irqreturn_t pat9125_irq(int irq, void *dev_data)
+{
+ u8 delta_x = 0, delta_y = 0, motion;
+ struct pixart_pat9125_data *data = dev_data;
+ struct input_dev *ipdev = data->input;
+ struct device *dev = &data->client->dev;
+
+ motion = read_data(data->client, PIXART_PAT9125_MOTION_STATUS_REG);
+ do {
+ /* check if MOTION bit is set or not */
+ if (motion & PIXART_PAT9125_VALID_MOTION_DATA) {
+ delta_x = read_data(data->client,
+ PIXART_PAT9125_DELTA_X_LO_REG);
+ delta_y = read_data(data->client,
+ PIXART_PAT9125_DELTA_Y_LO_REG);
+
+ /* Inverse x depending upon the device orientation */
+ delta_x = (data->inverse_x) ? -delta_x : delta_x;
+ /* Inverse y depending upon the device orientation */
+ delta_y = (data->inverse_y) ? -delta_y : delta_y;
+ }
+
+ dev_dbg(dev, "motion = %x, delta_x = %x, delta_y = %x\n",
+ motion, delta_x, delta_y);
+
+ if (delta_x != 0) {
+ /* Send delta_x as REL_WHEEL for rotation */
+ input_report_rel(ipdev, REL_WHEEL, (s8) delta_x);
+ input_sync(ipdev);
+ }
+
+ if (data->press_en && delta_y != 0) {
+ if ((s8) delta_y > 0) {
+ /* Send DOWN event for press keycode */
+ input_report_key(ipdev, data->press_keycode, 1);
+ input_sync(ipdev);
+ } else {
+ /* Send UP event for press keycode */
+ input_report_key(ipdev, data->press_keycode, 0);
+ input_sync(ipdev);
+ }
+ }
+ usleep_range(PIXART_SAMPLING_PERIOD_US_MIN,
+ PIXART_SAMPLING_PERIOD_US_MAX);
+
+ motion = read_data(data->client,
+ PIXART_PAT9125_MOTION_STATUS_REG);
+ } while (motion & PIXART_PAT9125_VALID_MOTION_DATA);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t pat9125_suspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *) dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int mode;
+
+ if (kstrtoint(buf, 10, &mode)) {
+ dev_err(dev, "failed to read input for sysfs\n");
+ return -EINVAL;
+ }
+
+ if (mode == 1)
+ pat9125_suspend(&client->dev);
+ else if (mode == 0)
+ pat9125_resume(&client->dev);
+
+ return count;
+}
+
+static ssize_t pat9125_test_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ char s[256], *p = s;
+ int reg_data = 0, i;
+ long rd_addr, wr_addr, wr_data;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *) dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+
+ for (i = 0; i < sizeof(s); i++)
+ s[i] = buf[i];
+ *(s+1) = '\0';
+ *(s+4) = '\0';
+ *(s+7) = '\0';
+ /* example(in console): echo w 12 34 > rw_reg */
+ if (*p == 'w') {
+ p += 2;
+ if (!kstrtol(p, 16, &wr_addr)) {
+ p += 3;
+ if (!kstrtol(p, 16, &wr_data)) {
+ dev_dbg(dev, "w 0x%x 0x%x\n",
+ (u8)wr_addr, (u8)wr_data);
+ write_data(client, (u8)wr_addr, (u8)wr_data);
+ }
+ }
+ }
+ /* example(in console): echo r 12 > rw_reg */
+ else if (*p == 'r') {
+ p += 2;
+
+ if (!kstrtol(p, 16, &rd_addr)) {
+ reg_data = read_data(client, (u8)rd_addr);
+ dev_dbg(dev, "r 0x%x 0x%x\n",
+ (unsigned int)rd_addr, reg_data);
+ }
+ }
+ return count;
+}
+
+static ssize_t pat9125_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+
+static DEVICE_ATTR(suspend, S_IRUGO | S_IWUSR | S_IWGRP,
+ NULL, pat9125_suspend_store);
+static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP,
+ pat9125_test_show, pat9125_test_store);
+
+static struct attribute *pat9125_attr_list[] = {
+ &dev_attr_test.attr,
+ &dev_attr_suspend.attr,
+ NULL,
+};
+
+static struct attribute_group pat9125_attr_grp = {
+ .attrs = pat9125_attr_list,
+};
+
+static int pixart_pinctrl_init(struct pixart_pat9125_data *data)
+{
+ int err;
+ struct device *dev = &data->client->dev;
+
+ data->pinctrl = devm_pinctrl_get(&(data->client->dev));
+ if (IS_ERR_OR_NULL(data->pinctrl)) {
+ err = PTR_ERR(data->pinctrl);
+ dev_err(dev, "Target does not use pinctrl %d\n", err);
+ return err;
+ }
+
+ data->pinctrl_state_active = pinctrl_lookup_state(data->pinctrl,
+ PINCTRL_STATE_ACTIVE);
+ if (IS_ERR_OR_NULL(data->pinctrl_state_active)) {
+ err = PTR_ERR(data->pinctrl_state_active);
+ dev_err(dev, "Can not lookup active pinctrl state %d\n", err);
+ return err;
+ }
+
+ data->pinctrl_state_suspend = pinctrl_lookup_state(data->pinctrl,
+ PINCTRL_STATE_SUSPEND);
+ if (IS_ERR_OR_NULL(data->pinctrl_state_suspend)) {
+ err = PTR_ERR(data->pinctrl_state_suspend);
+ dev_err(dev, "Can not lookup suspend pinctrl state %d\n", err);
+ return err;
+ }
+
+ data->pinctrl_state_release = pinctrl_lookup_state(data->pinctrl,
+ PINCTRL_STATE_RELEASE);
+ if (IS_ERR_OR_NULL(data->pinctrl_state_release)) {
+ err = PTR_ERR(data->pinctrl_state_release);
+ dev_err(dev, "Can not lookup release pinctrl state %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int pat9125_regulator_init(struct pixart_pat9125_data *data)
+{
+ int err = 0;
+ struct device *dev = &data->client->dev;
+
+ data->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd)) {
+ dev_err(dev, "Failed to get regulator vdd %ld\n",
+ PTR_ERR(data->vdd));
+ return PTR_ERR(data->vdd);
+ }
+
+ data->vld = devm_regulator_get(dev, "vld");
+ if (IS_ERR(data->vld)) {
+ dev_err(dev, "Failed to get regulator vld %ld\n",
+ PTR_ERR(data->vld));
+ return PTR_ERR(data->vld);
+ }
+
+ err = regulator_set_voltage(data->vdd, VDD_VTG_MIN_UV, VDD_VTG_MAX_UV);
+ if (err) {
+ dev_err(dev, "Failed to set voltage for vdd reg %d\n", err);
+ return err;
+ }
+
+ err = regulator_set_load(data->vdd, VDD_ACTIVE_LOAD_UA);
+ if (err < 0) {
+ dev_err(dev, "Failed to set opt mode for vdd reg %d\n", err);
+ return err;
+ }
+
+ err = regulator_set_voltage(data->vld, VLD_VTG_MIN_UV, VLD_VTG_MAX_UV);
+ if (err) {
+ dev_err(dev, "Failed to set voltage for vld reg %d\n", err);
+ return err;
+ }
+
+ err = regulator_set_load(data->vld, VLD_ACTIVE_LOAD_UA);
+ if (err < 0) {
+ dev_err(dev, "Failed to set opt mode for vld reg %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int pat9125_power_on(struct pixart_pat9125_data *data, bool on)
+{
+ int err = 0;
+ struct device *dev = &data->client->dev;
+
+ if (on) {
+ err = regulator_enable(data->vdd);
+ if (err) {
+ dev_err(dev, "Failed to enable vdd reg %d\n", err);
+ return err;
+ }
+
+ usleep_range(DELAY_BETWEEN_REG_US, DELAY_BETWEEN_REG_US + 1);
+
+ /*
+ * Initialize pixart sensor after some delay, when vdd
+ * regulator is enabled
+ */
+ if (!ots_sensor_init(data->client)) {
+ err = -ENODEV;
+ dev_err(dev, "Failed to initialize sensor %d\n", err);
+ return err;
+ }
+
+ err = regulator_enable(data->vld);
+ if (err) {
+ dev_err(dev, "Failed to enable vld reg %d\n", err);
+ return err;
+ }
+ } else {
+ err = regulator_disable(data->vld);
+ if (err) {
+ dev_err(dev, "Failed to disable vld reg %d\n", err);
+ return err;
+ }
+
+ err = regulator_disable(data->vdd);
+ if (err) {
+ dev_err(dev, "Failed to disable vdd reg %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int pat9125_parse_dt(struct device *dev,
+ struct pixart_pat9125_data *data)
+{
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ int ret;
+
+ data->inverse_x = of_property_read_bool(np, "pixart,inverse-x");
+ data->inverse_y = of_property_read_bool(np, "pixart,inverse-y");
+ data->press_en = of_property_read_bool(np, "pixart,press-enabled");
+ if (data->press_en) {
+ ret = of_property_read_u32(np, "pixart,press-keycode",
+ &temp_val);
+ if (!ret) {
+ data->press_keycode = temp_val;
+ } else {
+ dev_err(dev, "Unable to parse press-keycode\n");
+ return ret;
+ }
+ }
+
+ data->irq_gpio = of_get_named_gpio_flags(np, "pixart,irq-gpio",
+ 0, NULL);
+
+ return 0;
+}
+
+static int pat9125_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err = 0;
+ struct pixart_pat9125_data *data;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (err < 0) {
+ dev_err(dev, "I2C not supported\n");
+ return -ENXIO;
+ }
+
+ if (client->dev.of_node) {
+ data = devm_kzalloc(dev, sizeof(struct pixart_pat9125_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ err = pat9125_parse_dt(dev, data);
+ if (err) {
+ dev_err(dev, "DT parsing failed, errno:%d\n", err);
+ return err;
+ }
+ } else {
+ data = client->dev.platform_data;
+ if (!data) {
+ dev_err(dev, "Invalid pat9125 data\n");
+ return -EINVAL;
+ }
+ }
+ data->client = client;
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to alloc input device\n");
+ return -ENOMEM;
+ }
+
+ input_set_capability(input, EV_REL, REL_WHEEL);
+ if (data->press_en)
+ input_set_capability(input, EV_KEY, data->press_keycode);
+
+ i2c_set_clientdata(client, data);
+ input_set_drvdata(input, data);
+ input->name = PAT9125_DEV_NAME;
+
+ data->input = input;
+ err = input_register_device(data->input);
+ if (err < 0) {
+ dev_err(dev, "Failed to register input device\n");
+ return err;
+ }
+
+ err = pixart_pinctrl_init(data);
+ if (!err && data->pinctrl) {
+ /*
+ * Pinctrl handle is optional. If pinctrl handle is found
+ * let pins to be configured in active state. If not
+ * found continue further without error.
+ */
+ err = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_active);
+ if (err < 0)
+ dev_err(dev, "Could not set pin to active state %d\n",
+ err);
+ } else {
+ if (gpio_is_valid(data->irq_gpio)) {
+ err = devm_gpio_request(dev, data->irq_gpio,
+ "pixart_pat9125_irq_gpio");
+ if (err) {
+ dev_err(dev, "Couldn't request gpio %d\n", err);
+ return err;
+ }
+ err = gpio_direction_input(data->irq_gpio);
+ if (err) {
+ dev_err(dev, "Couldn't set dir for gpio %d\n",
+ err);
+ return err;
+ }
+ } else {
+ dev_err(dev, "Invalid gpio %d\n", data->irq_gpio);
+ return -EINVAL;
+ }
+ }
+
+ err = pat9125_regulator_init(data);
+ if (err) {
+ dev_err(dev, "Failed to init regulator, %d\n", err);
+ return err;
+ }
+
+ err = pat9125_power_on(data, true);
+ if (err) {
+ dev_err(dev, "Failed to power-on the sensor %d\n", err);
+ goto err_power_on;
+ }
+
+ err = devm_request_threaded_irq(dev, client->irq, NULL, pat9125_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW,
+ "pixart_pat9125_irq", data);
+ if (err) {
+ dev_err(dev, "Req irq %d failed, errno:%d\n", client->irq, err);
+ goto err_request_threaded_irq;
+ }
+
+ err = sysfs_create_group(&(input->dev.kobj), &pat9125_attr_grp);
+ if (err) {
+ dev_err(dev, "Failed to create sysfs group, errno:%d\n", err);
+ goto err_sysfs_create;
+ }
+
+ return 0;
+
+err_sysfs_create:
+err_request_threaded_irq:
+err_power_on:
+ regulator_set_load(data->vdd, 0);
+ regulator_set_load(data->vld, 0);
+ if (pat9125_power_on(data, false) < 0)
+ dev_err(dev, "Failed to disable regulators\n");
+ if (data->pinctrl)
+ if (pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_release) < 0)
+ dev_err(dev, "Couldn't set pin to release state\n");
+
+ return err;
+}
+
+static int pat9125_i2c_remove(struct i2c_client *client)
+{
+ struct pixart_pat9125_data *data = i2c_get_clientdata(client);
+ struct device *dev = &data->client->dev;
+
+ sysfs_remove_group(&(data->input->dev.kobj), &pat9125_attr_grp);
+ if (data->pinctrl)
+ if (pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_release) < 0)
+ dev_err(dev, "Couldn't set pin to release state\n");
+ regulator_set_load(data->vdd, 0);
+ regulator_set_load(data->vld, 0);
+ pat9125_power_on(data, false);
+ return 0;
+}
+
+static int pat9125_suspend(struct device *dev)
+{
+ int rc;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *) dev_get_drvdata(dev);
+
+ disable_irq(data->client->irq);
+ if (data->pinctrl) {
+ rc = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_suspend);
+ if (rc < 0)
+ dev_err(dev, "Could not set pin to suspend state %d\n",
+ rc);
+ }
+
+ rc = pat9125_power_on(data, false);
+ if (rc) {
+ dev_err(dev, "Failed to disable regulators %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pat9125_resume(struct device *dev)
+{
+ int rc;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *) dev_get_drvdata(dev);
+
+ if (data->pinctrl) {
+ rc = pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_active);
+ if (rc < 0)
+ dev_err(dev, "Could not set pin to active state %d\n",
+ rc);
+ }
+
+ rc = pat9125_power_on(data, true);
+ if (rc) {
+ dev_err(dev, "Failed to power-on the sensor %d\n", rc);
+ goto err_sensor_init;
+ }
+
+ enable_irq(data->client->irq);
+
+ return 0;
+
+err_sensor_init:
+ if (data->pinctrl)
+ if (pinctrl_select_state(data->pinctrl,
+ data->pinctrl_state_suspend) < 0)
+ dev_err(dev, "Couldn't set pin to suspend state\n");
+ if (pat9125_power_on(data, false) < 0)
+ dev_err(dev, "Failed to disable regulators\n");
+
+ return rc;
+}
+
+static const struct i2c_device_id pat9125_device_id[] = {
+ {PAT9125_DEV_NAME, 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pat9125_device_id);
+
+static const struct dev_pm_ops pat9125_pm_ops = {
+ .suspend = pat9125_suspend,
+ .resume = pat9125_resume
+};
+
+static const struct of_device_id pixart_pat9125_match_table[] = {
+ { .compatible = "pixart,pat9125",},
+ { },
+};
+
+static struct i2c_driver pat9125_i2c_driver = {
+ .driver = {
+ .name = PAT9125_DEV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &pat9125_pm_ops,
+ .of_match_table = pixart_pat9125_match_table,
+ },
+ .probe = pat9125_i2c_probe,
+ .remove = pat9125_i2c_remove,
+ .id_table = pat9125_device_id,
+};
+module_i2c_driver(pat9125_i2c_driver);
+
+MODULE_AUTHOR("pixart");
+MODULE_DESCRIPTION("pixart pat9125 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.c b/drivers/input/misc/ots_pat9125/pixart_ots.c
new file mode 100644
index 000000000000..3d44d068423a
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.c
@@ -0,0 +1,78 @@
+/* drivers/input/misc/ots_pat9125/pixart_ots.c
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include "pixart_platform.h"
+#include "pixart_ots.h"
+
+static void ots_write_read(struct i2c_client *client, u8 address, u8 wdata)
+{
+ u8 read_value;
+
+ do {
+ write_data(client, address, wdata);
+ read_value = read_data(client, address);
+ } while (read_value != wdata);
+}
+
+bool ots_sensor_init(struct i2c_client *client)
+{
+ u8 sensor_pid = 0;
+ bool read_id_ok = false;
+
+ /*
+ * Read sensor_pid in address 0x00 to check if the
+ * serial link is valid, read value should be 0x31.
+ */
+ sensor_pid = read_data(client, PIXART_PAT9125_PRODUCT_ID1_REG);
+
+ if (sensor_pid == PIXART_PAT9125_SENSOR_ID) {
+ read_id_ok = true;
+
+ /*
+ * PAT9125 sensor recommended settings:
+ * switch to bank0, not allowed to perform ots_write_read
+ */
+ write_data(client, PIXART_PAT9125_SELECT_BANK_REG,
+ PIXART_PAT9125_BANK0);
+ /*
+ * software reset (i.e. set bit7 to 1).
+ * It will reset to 0 automatically
+ * so perform OTS_RegWriteRead is not allowed.
+ */
+ write_data(client, PIXART_PAT9125_CONFIG_REG,
+ PIXART_PAT9125_RESET);
+
+ /* delay 1ms */
+ usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1);
+
+ /* disable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_DISABLE_WRITE_PROTECT);
+ /* set X-axis resolution (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_X_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_X);
+ /* set Y-axis resolution (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_Y_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_Y);
+ /* set 12-bit X/Y data format (depends on application) */
+ ots_write_read(client, PIXART_PAT9125_ORIENTATION_REG,
+ PIXART_PAT9125_MOTION_DATA_LENGTH);
+ /* ONLY for VDD=VDDA=1.7~1.9V: for power saving */
+ ots_write_read(client, PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG,
+ PIXART_PAT9125_LOW_VOLTAGE_SEGMENT);
+
+ if (read_data(client, PIXART_PAT9125_MISC2_REG) == 0x04) {
+ ots_write_read(client, PIXART_PAT9125_MISC2_REG, 0x08);
+ if (read_data(client, PIXART_PAT9125_MISC1_REG) == 0x10)
+ ots_write_read(client, PIXART_PAT9125_MISC1_REG,
+ 0x19);
+ }
+ /* enable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_ENABLE_WRITE_PROTECT);
+ }
+ return read_id_ok;
+}
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.h b/drivers/input/misc/ots_pat9125/pixart_ots.h
new file mode 100644
index 000000000000..5320d588d341
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.h
@@ -0,0 +1,58 @@
+/* drivers/input/misc/ots_pat9125/pixart_ots.h
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef __PIXART_OTS_H_
+#define __PIXART_OTS_H_
+
+#define PAT9125_DEV_NAME "pixart_pat9125"
+#define MAX_BUF_SIZE 20
+#define RESET_DELAY_US 1000
+#define PINCTRL_STATE_ACTIVE "pmx_rot_switch_active"
+#define PINCTRL_STATE_SUSPEND "pmx_rot_switch_suspend"
+#define PINCTRL_STATE_RELEASE "pmx_rot_switch_release"
+#define VDD_VTG_MIN_UV 1800000
+#define VDD_VTG_MAX_UV 1800000
+#define VDD_ACTIVE_LOAD_UA 10000
+#define VLD_VTG_MIN_UV 2800000
+#define VLD_VTG_MAX_UV 3300000
+#define VLD_ACTIVE_LOAD_UA 10000
+#define DELAY_BETWEEN_REG_US 20000
+
+/* Register addresses */
+#define PIXART_PAT9125_PRODUCT_ID1_REG 0x00
+#define PIXART_PAT9125_PRODUCT_ID2_REG 0x01
+#define PIXART_PAT9125_MOTION_STATUS_REG 0x02
+#define PIXART_PAT9125_DELTA_X_LO_REG 0x03
+#define PIXART_PAT9125_DELTA_Y_LO_REG 0x04
+#define PIXART_PAT9125_CONFIG_REG 0x06
+#define PIXART_PAT9125_WRITE_PROTECT_REG 0x09
+#define PIXART_PAT9125_SET_CPI_RES_X_REG 0x0D
+#define PIXART_PAT9125_SET_CPI_RES_Y_REG 0x0E
+#define PIXART_PAT9125_DELTA_XY_HI_REG 0x12
+#define PIXART_PAT9125_ORIENTATION_REG 0x19
+#define PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG 0x4B
+#define PIXART_PAT9125_SELECT_BANK_REG 0x7F
+#define PIXART_PAT9125_MISC1_REG 0x5D
+#define PIXART_PAT9125_MISC2_REG 0x5E
+/*Register configuration data */
+#define PIXART_PAT9125_SENSOR_ID 0x31
+#define PIXART_PAT9125_RESET 0x97
+#define PIXART_PAT9125_MOTION_DATA_LENGTH 0x04
+#define PIXART_PAT9125_BANK0 0x00
+#define PIXART_PAT9125_DISABLE_WRITE_PROTECT 0x5A
+#define PIXART_PAT9125_ENABLE_WRITE_PROTECT 0x00
+#define PIXART_PAT9125_CPI_RESOLUTION_X 0x65
+#define PIXART_PAT9125_CPI_RESOLUTION_Y 0xFF
+#define PIXART_PAT9125_LOW_VOLTAGE_SEGMENT 0x04
+#define PIXART_PAT9125_VALID_MOTION_DATA 0x80
+
+#define PIXART_SAMPLING_PERIOD_US_MIN 4000
+#define PIXART_SAMPLING_PERIOD_US_MAX 8000
+
+/* Export functions */
+bool ots_sensor_init(struct i2c_client *);
+
+#endif
diff --git a/drivers/input/misc/ots_pat9125/pixart_platform.h b/drivers/input/misc/ots_pat9125/pixart_platform.h
new file mode 100644
index 000000000000..1fe448fdc2cb
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/pixart_platform.h
@@ -0,0 +1,17 @@
+/* drivers/input/misc/ots_pat9125/pixart_platform.h
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#ifndef __PIXART_PLATFORM_H_
+#define __PIXART_PLATFORM_H_
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+
+/* extern functions */
+extern unsigned char read_data(struct i2c_client *, u8 addr);
+extern void write_data(struct i2c_client *, u8 addr, u8 data);
+
+#endif
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index a9d7666a6d6f..3b19a45922c4 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -115,6 +115,8 @@ struct i2c_client *i2c_connect_client;
#define GTP_DEBUGFS_DIR "ts_debug"
#define GTP_DEBUGFS_FILE_SUSPEND "suspend"
+#define GTP_DEBUGFS_FILE_DATA "data"
+#define GTP_DEBUGFS_FILE_ADDR "addr"
/*******************************************************
Function:
@@ -1521,12 +1523,108 @@ static ssize_t gtp_fw_name_store(struct device *dev,
return size;
}
+static ssize_t gtp_fw_upgrade_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+
+ return snprintf(buf, 2, "%d\n", ts->fw_loading);
+}
+
+static ssize_t gtp_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ if (size > 2)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (ts->gtp_is_suspend) {
+ dev_err(&ts->client->dev,
+ "Can't start fw upgrade. Device is in suspend state");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (!ts->fw_loading && val) {
+ disable_irq(ts->client->irq);
+ ts->fw_loading = true;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_update_proc(NULL);
+ if (ret == FAIL)
+ dev_err(&ts->client->dev,
+ "Fail to update GTP firmware\n");
+ }
+ ts->fw_loading = false;
+ enable_irq(ts->client->irq);
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return size;
+}
+
+static ssize_t gtp_force_fw_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct goodix_ts_data *ts = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ if (size > 2)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (ts->gtp_is_suspend) {
+ dev_err(&ts->client->dev,
+ "Can't start fw upgrade. Device is in suspend state.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (!ts->fw_loading && val) {
+ disable_irq(ts->client->irq);
+ ts->fw_loading = true;
+ ts->force_update = true;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_update_proc(NULL);
+ if (ret == FAIL)
+ dev_err(&ts->client->dev,
+ "Fail to force update GTP firmware.\n");
+ }
+ ts->force_update = false;
+ ts->fw_loading = false;
+ enable_irq(ts->client->irq);
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return size;
+}
+
static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR | S_IWGRP),
gtp_fw_name_show,
gtp_fw_name_store);
+static DEVICE_ATTR(fw_upgrade, (S_IRUGO | S_IWUSR | S_IWGRP),
+ gtp_fw_upgrade_show,
+ gtp_fw_upgrade_store);
+static DEVICE_ATTR(force_fw_upgrade, (S_IRUGO | S_IWUSR | S_IWGRP),
+ gtp_fw_upgrade_show,
+ gtp_force_fw_upgrade_store);
static struct attribute *gtp_attrs[] = {
&dev_attr_fw_name.attr,
+ &dev_attr_fw_upgrade.attr,
+ &dev_attr_force_fw_upgrade.attr,
NULL
};
@@ -1534,6 +1632,84 @@ static const struct attribute_group gtp_attr_grp = {
.attrs = gtp_attrs,
};
+static int gtp_debug_addr_is_valid(u16 addr)
+{
+ if (addr < GTP_VALID_ADDR_START || addr > GTP_VALID_ADDR_END) {
+ pr_err("GTP reg address is invalid: 0x%x\n", addr);
+ return false;
+ }
+
+ return true;
+}
+
+static int gtp_debug_data_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ dev_err(&ts->client->dev,
+ "Writing to GTP registers not supported\n");
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+static int gtp_debug_data_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+ int ret;
+ u8 buf[3] = {0};
+
+ mutex_lock(&ts->input_dev->mutex);
+ buf[0] = ts->addr >> 8;
+ buf[1] = ts->addr & 0x00ff;
+
+ if (gtp_debug_addr_is_valid(ts->addr)) {
+ ret = gtp_i2c_read(ts->client, buf, 3);
+ if (ret < 0)
+ dev_err(&ts->client->dev,
+ "GTP read register 0x%x failed (%d)\n",
+ ts->addr, ret);
+ else
+ *val = buf[2];
+ }
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, gtp_debug_data_get,
+ gtp_debug_data_set, "%llx\n");
+
+static int gtp_debug_addr_set(void *_data, u64 val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ if (gtp_debug_addr_is_valid(val)) {
+ mutex_lock(&ts->input_dev->mutex);
+ ts->addr = val;
+ mutex_unlock(&ts->input_dev->mutex);
+ }
+
+ return 0;
+}
+
+static int gtp_debug_addr_get(void *_data, u64 *val)
+{
+ struct goodix_ts_data *ts = _data;
+
+ mutex_lock(&ts->input_dev->mutex);
+ if (gtp_debug_addr_is_valid(ts->addr))
+ *val = ts->addr;
+ mutex_unlock(&ts->input_dev->mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, gtp_debug_addr_get,
+ gtp_debug_addr_set, "%llx\n");
+
static int gtp_debug_suspend_set(void *_data, u64 val)
{
struct goodix_ts_data *ts = _data;
@@ -1567,7 +1743,7 @@ static int gtp_debugfs_init(struct goodix_ts_data *data)
data->debug_base = debugfs_create_dir(GTP_DEBUGFS_DIR, NULL);
if (IS_ERR_OR_NULL(data->debug_base)) {
- pr_err("Failed to create debugfs dir\n");
+ dev_err(&data->client->dev, "Failed to create debugfs dir\n");
return -EINVAL;
}
@@ -1576,7 +1752,27 @@ static int gtp_debugfs_init(struct goodix_ts_data *data)
data->debug_base,
data,
&debug_suspend_fops)))) {
- pr_err("Failed to create suspend file\n");
+ dev_err(&data->client->dev, "Failed to create suspend file\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_DATA,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_data_fops)))) {
+ dev_err(&data->client->dev, "Failed to create data file\n");
+ debugfs_remove_recursive(data->debug_base);
+ return -EINVAL;
+ }
+
+ if ((IS_ERR_OR_NULL(debugfs_create_file(GTP_DEBUGFS_FILE_ADDR,
+ S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
+ data->debug_base,
+ data,
+ &debug_addr_fops)))) {
+ dev_err(&data->client->dev, "Failed to create addr file\n");
debugfs_remove_recursive(data->debug_base);
return -EINVAL;
}
@@ -1645,8 +1841,8 @@ static int goodix_parse_dt(struct device *dev,
pdata->i2c_pull_up = of_property_read_bool(np,
"goodix,i2c-pull-up");
- pdata->no_force_update = of_property_read_bool(np,
- "goodix,no-force-update");
+ pdata->force_update = of_property_read_bool(np,
+ "goodix,force-update");
pdata->enable_power_off = of_property_read_bool(np,
"goodix,enable-power-off");
@@ -1761,9 +1957,7 @@ static int goodix_ts_probe(struct i2c_client *client,
return -EINVAL;
}
-#if GTP_ESD_PROTECT
i2c_connect_client = client;
-#endif
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "GTP I2C not supported\n");
@@ -1811,22 +2005,24 @@ static int goodix_ts_probe(struct i2c_client *client,
goto exit_power_off;
}
+ if (pdata->force_update)
+ ts->force_update = true;
+
if (pdata->fw_name)
strlcpy(ts->fw_name, pdata->fw_name,
strlen(pdata->fw_name) + 1);
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
- ret = gup_init_update_proc(ts);
- if (ret < 0) {
- dev_err(&client->dev,
- "GTP Create firmware update thread error.\n");
- goto exit_power_off;
+ if (config_enabled(CONFIG_GT9XX_TOUCHPANEL_UPDATE)) {
+ ret = gup_init_update_proc(ts);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "GTP Create firmware update thread error\n");
+ goto exit_power_off;
+ }
}
-#endif
-
ret = gtp_init_panel(ts);
if (ret < 0) {
- dev_err(&client->dev, "GTP init panel failed.\n");
+ dev_err(&client->dev, "GTP init panel failed\n");
ts->abs_x_max = GTP_MAX_WIDTH;
ts->abs_y_max = GTP_MAX_HEIGHT;
ts->int_trigger_type = GTP_INT_TRIGGER;
@@ -1834,7 +2030,7 @@ static int goodix_ts_probe(struct i2c_client *client,
ret = gtp_request_input_dev(ts);
if (ret) {
- dev_err(&client->dev, "GTP request input dev failed.\n");
+ dev_err(&client->dev, "GTP request input dev failed\n");
goto exit_free_inputdev;
}
input_set_drvdata(ts->input_dev, ts);
@@ -2017,6 +2213,14 @@ static int goodix_ts_suspend(struct device *dev)
}
mutex_lock(&ts->lock);
+
+ if (ts->fw_loading) {
+ dev_info(&ts->client->dev,
+ "Fw upgrade in progress, can't go to suspend.");
+ mutex_unlock(&ts->lock);
+ return 0;
+ }
+
#if GTP_ESD_PROTECT
gtp_esd_switch(ts->client, SWITCH_OFF);
#endif
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index 38487eea7b10..779a0ddd93f8 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -53,7 +53,7 @@ struct goodix_ts_platform_data {
u32 panel_miny;
u32 panel_maxx;
u32 panel_maxy;
- bool no_force_update;
+ bool force_update;
bool i2c_pull_up;
bool enable_power_off;
size_t config_data_len[GOODIX_MAX_CFG_GROUP];
@@ -74,6 +74,7 @@ struct goodix_ts_data {
s32 use_irq;
u16 abs_x_max;
u16 abs_y_max;
+ u16 addr;
u8 max_touch_num;
u8 int_trigger_type;
u8 green_wake_mode;
@@ -88,6 +89,8 @@ struct goodix_ts_data {
u8 fw_error;
bool power_on;
struct mutex lock;
+ bool fw_loading;
+ bool force_update;
struct regulator *avdd;
struct regulator *vdd;
struct regulator *vcc_i2c;
@@ -172,6 +175,8 @@ extern u16 total_len;
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
#define GTP_I2C_ADDRESS_HIGH 0x14
#define GTP_I2C_ADDRESS_LOW 0x5D
+#define GTP_VALID_ADDR_START 0x8040
+#define GTP_VALID_ADDR_END 0x8177
/* GTP CM_HEAD RW flags */
#define GTP_RW_READ 0
@@ -210,11 +215,9 @@ s32 init_wr_node(struct i2c_client *client);
void uninit_wr_node(void);
#endif
-#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
-extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+u8 gup_init_update_proc(struct goodix_ts_data *ts);
s32 gup_enter_update_mode(struct i2c_client *client);
void gup_leave_update_mode(struct i2c_client *client);
s32 gup_update_proc(void *dir);
extern struct i2c_client *i2c_connect_client;
-#endif
#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx_update.c b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
index 4660b27d156c..a91256c576e3 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx_update.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx_update.c
@@ -1408,10 +1408,15 @@ s32 gup_update_proc(void *dir)
goto file_fail;
}
- ret = gup_enter_update_judge(ts->client, &fw_head);
- if (ret == FAIL) {
- pr_err("Check *.bin file fail");
- goto file_fail;
+ if (ts->force_update) {
+ dev_dbg(&ts->client->dev, "Enter force update.");
+ } else {
+ ret = gup_enter_update_judge(ts->client, &fw_head);
+ if (ret == FAIL) {
+ dev_err(&ts->client->dev,
+ "Check *.bin file fail.");
+ goto file_fail;
+ }
}
ts->enter_update = 1;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c69927bd4ff2..702706ae60f7 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1888,8 +1888,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return NULL;
smmu_domain->secure_vmid = VMID_INVAL;
- /* disable coherent htw by default */
- smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
INIT_LIST_HEAD(&smmu_domain->pte_info_list);
INIT_LIST_HEAD(&smmu_domain->unassign_list);
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
@@ -2263,15 +2261,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto err_destroy_domain_context;
}
- if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE))
- && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) {
- dev_err(dev,
- "Can't attach: this domain wants coherent htw but %s doesn't support it\n",
- dev_name(smmu_domain->smmu->dev));
- ret = -EINVAL;
- goto err_destroy_domain_context;
- }
-
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (ret)
@@ -2949,7 +2938,7 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
*/
group = generic_device_group(dev);
- if (IS_ERR(group))
+ if (IS_ERR_OR_NULL(group))
return group;
if (dev_is_pci(dev))
@@ -2977,11 +2966,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
ret = 0;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- *((int *)data) = !!(smmu_domain->attributes &
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE));
- ret = 0;
- break;
case DOMAIN_ATTR_SECURE_VMID:
*((int *)data) = smmu_domain->secure_vmid;
ret = 0;
@@ -3083,29 +3067,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- {
- struct arm_smmu_device *smmu;
- int htw_disable = *((int *)data);
-
- smmu = smmu_domain->smmu;
-
- if (smmu && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- && !htw_disable) {
- dev_err(smmu->dev,
- "Can't enable coherent htw on this domain: this SMMU doesn't support it\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (htw_disable)
- smmu_domain->attributes |=
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- else
- smmu_domain->attributes &=
- ~(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- break;
- }
case DOMAIN_ATTR_SECURE_VMID:
BUG_ON(smmu_domain->secure_vmid != VMID_INVAL);
smmu_domain->secure_vmid = *((int *)data);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ea8db1a431d0..266f7065fca4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -649,7 +649,7 @@ err:
int fast_smmu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
- int htw_disable = 1, atomic_domain = 1;
+ int atomic_domain = 1;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
size_t size = mapping->bits << PAGE_SHIFT;
@@ -657,10 +657,6 @@ int fast_smmu_attach_device(struct device *dev,
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
- if (iommu_domain_set_attr(domain, DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable))
- return -EINVAL;
-
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
&atomic_domain))
return -EINVAL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4036997f49c7..3333f15f7f16 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -363,7 +363,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
*ptep = pte;
@@ -940,7 +940,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
else
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index a0227fd05939..3b54fd4a77e6 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -48,8 +48,6 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
return "DOMAIN_ATTR_FSL_PAMUV1";
case DOMAIN_ATTR_NESTING:
return "DOMAIN_ATTR_NESTING";
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- return "DOMAIN_ATTR_COHERENT_HTW_DISABLE";
case DOMAIN_ATTR_PT_BASE_ADDR:
return "DOMAIN_ATTR_PT_BASE_ADDR";
case DOMAIN_ATTR_SECURE_VMID:
@@ -96,7 +94,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
{
struct iommu_debug_attachment *attach = s->private;
phys_addr_t pt_phys;
- int coherent_htw_disable;
int secure_vmid;
seq_printf(s, "Domain: 0x%p\n", attach->domain);
@@ -110,14 +107,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
pt_virt, &pt_phys);
}
- seq_puts(s, "COHERENT_HTW_DISABLE: ");
- if (iommu_domain_get_attr(attach->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &coherent_htw_disable))
- seq_puts(s, "(Unknown)\n");
- else
- seq_printf(s, "%d\n", coherent_htw_disable);
-
seq_puts(s, "SECURE_VMID: ");
if (iommu_domain_get_attr(attach->domain,
DOMAIN_ATTR_SECURE_VMID,
@@ -733,7 +722,6 @@ static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int htw_disable = 1, atomic = 1;
@@ -764,7 +752,6 @@ static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
DOMAIN_ATTR_SECURE_VMID,
};
@@ -797,7 +784,6 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
enum iommu_attr attrs[] = {
DOMAIN_ATTR_FAST,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int one = 1;
@@ -1507,7 +1493,6 @@ static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
int val, bool is_secure)
{
- int htw_disable = 1;
struct bus_type *bus;
bus = msm_iommu_get_bus(ddev->dev);
@@ -1520,13 +1505,6 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -ENOMEM;
}
- if (iommu_domain_set_attr(ddev->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable)) {
- pr_err("Couldn't disable coherent htw\n");
- goto out_domain_free;
- }
-
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 0a8728ce36dc..25fe36ab6339 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <asm/barrier.h>
#include <linux/msm_dma_iommu_mapping.h>
@@ -216,10 +217,13 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_length = iommu_map->sgl.dma_length;
kref_get(&iommu_map->ref);
- /*
- * Need to do cache operations here based on "dir" in the
- * future if we go with coherent mappings.
- */
+ if (is_device_dma_coherent(dev))
+ /*
+ * Ensure all outstanding changes for coherent
+ * buffers are applied to the cache before any
+ * DMA occurs.
+ */
+ dmb(ish);
ret = nents;
}
mutex_unlock(&iommu_meta->lock);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 0cae5d2e5263..bc94dff08d21 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -48,6 +48,7 @@
#define FLASH_LED_REG_THERMAL_THRSH3(base) (base + 0x58)
#define FLASH_LED_REG_VPH_DROOP_THRESHOLD(base) (base + 0x61)
#define FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base) (base + 0x62)
+#define FLASH_LED_REG_ILED_GRT_THRSH(base) (base + 0x67)
#define FLASH_LED_REG_MITIGATION_SEL(base) (base + 0x6E)
#define FLASH_LED_REG_MITIGATION_SW(base) (base + 0x6F)
#define FLASH_LED_REG_LMH_LEVEL(base) (base + 0x70)
@@ -62,22 +63,25 @@
#define FLASH_LED_ISC_WARMUP_DELAY_MASK GENMASK(1, 0)
#define FLASH_LED_CURRENT_DERATE_EN_MASK GENMASK(2, 0)
#define FLASH_LED_VPH_DROOP_DEBOUNCE_MASK GENMASK(1, 0)
+#define FLASH_LED_CHGR_MITIGATION_SEL_MASK GENMASK(5, 4)
#define FLASH_LED_LMH_MITIGATION_SEL_MASK GENMASK(1, 0)
+#define FLASH_LED_ILED_GRT_THRSH_MASK GENMASK(5, 0)
#define FLASH_LED_LMH_LEVEL_MASK GENMASK(1, 0)
#define FLASH_LED_VPH_DROOP_HYSTERESIS_MASK GENMASK(5, 4)
#define FLASH_LED_VPH_DROOP_THRESHOLD_MASK GENMASK(2, 0)
#define FLASH_LED_THERMAL_THRSH_MASK GENMASK(2, 0)
#define FLASH_LED_THERMAL_OTST_MASK GENMASK(2, 0)
-#define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(2, 0)
#define FLASH_LED_MOD_CTRL_MASK BIT(7)
#define FLASH_LED_HW_SW_STROBE_SEL_MASK BIT(2)
#define FLASH_LED_VPH_DROOP_FAULT_MASK BIT(4)
#define FLASH_LED_LMH_MITIGATION_EN_MASK BIT(0)
+#define FLASH_LED_CHGR_MITIGATION_EN_MASK BIT(4)
#define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8)
#define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25)
#define VPH_DROOP_THRESH_MV_TO_VAL(val_mv) ((val_mv / 100) - 25)
#define VPH_DROOP_THRESH_VAL_TO_UV(val) ((val + 25) * 100000)
+#define MITIGATION_THRSH_MA_TO_VAL(val_ma) (val_ma / 100)
#define FLASH_LED_ISC_WARMUP_DELAY_SHIFT 6
#define FLASH_LED_WARMUP_DELAY_DEFAULT 2
@@ -99,8 +103,13 @@
#define FLASH_LED_LMH_LEVEL_DEFAULT 0
#define FLASH_LED_LMH_MITIGATION_ENABLE 1
#define FLASH_LED_LMH_MITIGATION_DISABLE 0
-#define FLASH_LED_LMH_MITIGATION_SEL_DEFAULT 2
-#define FLASH_LED_LMH_MITIGATION_SEL_MAX 2
+#define FLASH_LED_CHGR_MITIGATION_ENABLE BIT(4)
+#define FLASH_LED_CHGR_MITIGATION_DISABLE 0
+#define FLASH_LED_MITIGATION_SEL_DEFAULT 2
+#define FLASH_LED_MITIGATION_SEL_MAX 2
+#define FLASH_LED_CHGR_MITIGATION_SEL_SHIFT 4
+#define FLASH_LED_MITIGATION_THRSH_DEFAULT 0xA
+#define FLASH_LED_MITIGATION_THRSH_MAX 0x1F
#define FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV 3700000
#define FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM 400000
#define FLASH_LED_IRES_BASE 3
@@ -199,7 +208,9 @@ struct flash_led_platform_data {
u8 vph_droop_hysteresis;
u8 vph_droop_debounce;
u8 lmh_mitigation_sel;
+ u8 chgr_mitigation_sel;
u8 lmh_level;
+ u8 iled_thrsh_val;
u8 hw_strobe_option;
bool hdrm_auto_mode_en;
bool thermal_derate_en;
@@ -222,6 +233,7 @@ struct qpnp_flash_led {
int enable;
u16 base;
bool trigger_lmh;
+ bool trigger_chgr;
};
static int
@@ -352,12 +364,26 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
return rc;
rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SEL(led->base),
+ FLASH_LED_CHGR_MITIGATION_SEL_MASK,
+ led->pdata->chgr_mitigation_sel);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LMH_LEVEL(led->base),
FLASH_LED_LMH_LEVEL_MASK,
led->pdata->lmh_level);
if (rc < 0)
return rc;
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_ILED_GRT_THRSH(led->base),
+ FLASH_LED_ILED_GRT_THRSH_MASK,
+ led->pdata->iled_thrsh_val);
+ if (rc < 0)
+ return rc;
+
return 0;
}
@@ -739,6 +765,18 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
}
}
+ if (!led->trigger_chgr) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SW(led->base),
+ FLASH_LED_CHGR_MITIGATION_EN_MASK,
+ FLASH_LED_CHGR_MITIGATION_DISABLE);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "disable chgr mitigation failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
led->enable--;
if (led->enable == 0) {
rc = qpnp_flash_led_masked_write(led,
@@ -892,6 +930,18 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
}
}
+ if (led->trigger_chgr) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SW(led->base),
+ FLASH_LED_CHGR_MITIGATION_EN_MASK,
+ FLASH_LED_CHGR_MITIGATION_ENABLE);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "trigger chgr mitigation failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_EN_LED_CTRL(led->base),
snode->led_mask, val);
@@ -951,6 +1001,10 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
*max_current = rc;
}
+ led->trigger_chgr = false;
+ if (options & PRE_FLASH)
+ led->trigger_chgr = true;
+
return 0;
}
@@ -959,17 +1013,24 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
{
struct flash_node_data *fnode = NULL;
struct flash_switch_data *snode = NULL;
- struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+ struct qpnp_flash_led *led = NULL;
int rc;
if (!strncmp(led_cdev->name, "led:switch", strlen("led:switch"))) {
snode = container_of(led_cdev, struct flash_switch_data, cdev);
led = dev_get_drvdata(&snode->pdev->dev);
- } else {
+ } else if (!strncmp(led_cdev->name, "led:flash", strlen("led:flash")) ||
+ !strncmp(led_cdev->name, "led:torch",
+ strlen("led:torch"))) {
fnode = container_of(led_cdev, struct flash_node_data, cdev);
led = dev_get_drvdata(&fnode->pdev->dev);
}
+ if (!led) {
+ pr_err("Failed to get flash driver data\n");
+ return;
+ }
+
spin_lock(&led->lock);
if (snode) {
rc = qpnp_flash_led_switch_set(snode, value > 0);
@@ -1649,7 +1710,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
return rc;
}
- led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT;
+ led->pdata->lmh_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val);
if (!rc) {
led->pdata->lmh_mitigation_sel = val;
@@ -1659,11 +1720,43 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
return rc;
}
- if (led->pdata->lmh_mitigation_sel > FLASH_LED_LMH_MITIGATION_SEL_MAX) {
+ if (led->pdata->lmh_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
dev_err(&led->pdev->dev, "Invalid lmh_mitigation_sel specified\n");
return -EINVAL;
}
+ led->pdata->chgr_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
+ if (!rc) {
+ led->pdata->chgr_mitigation_sel = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to parse chgr_mitigation_sel, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (led->pdata->chgr_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
+ dev_err(&led->pdev->dev, "Invalid chgr_mitigation_sel specified\n");
+ return -EINVAL;
+ }
+
+ led->pdata->chgr_mitigation_sel <<= FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
+
+ led->pdata->iled_thrsh_val = FLASH_LED_MITIGATION_THRSH_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
+ if (!rc) {
+ led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to parse iled_thrsh_val, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (led->pdata->iled_thrsh_val > FLASH_LED_MITIGATION_THRSH_MAX) {
+ dev_err(&led->pdev->dev, "Invalid iled_thrsh_val specified\n");
+ return -EINVAL;
+ }
+
led->pdata->all_ramp_up_done_irq =
of_irq_get_byname(node, "all-ramp-up-done-irq");
if (led->pdata->all_ramp_up_done_irq < 0)
diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h
index cd02396abbe7..227dbfa97d64 100644
--- a/drivers/media/dvb-core/demux.h
+++ b/drivers/media/dvb-core/demux.h
@@ -407,8 +407,7 @@ typedef int (*dmx_ts_cb)(const u8 *buffer1,
size_t buffer1_length,
const u8 *buffer2,
size_t buffer2_length,
- struct dmx_ts_feed *source,
- enum dmx_success success);
+ struct dmx_ts_feed *source);
/**
* typedef dmx_section_cb - DVB demux TS filter callback function prototype
@@ -449,8 +448,7 @@ typedef int (*dmx_section_cb)(const u8 *buffer1,
size_t buffer1_len,
const u8 *buffer2,
size_t buffer2_len,
- struct dmx_section_filter *source,
- enum dmx_success success);
+ struct dmx_section_filter *source);
typedef int (*dmx_ts_fullness) (
struct dmx_ts_feed *source,
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 63becfd57eaa..a9c4237d631a 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -2603,15 +2603,15 @@ static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter)
static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter,
- enum dmx_success success)
+ struct dmx_section_filter *filter)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
struct dmx_filter_event event;
ssize_t free;
+
if (!dmxdevfilter) {
- pr_err("%s: null filter. status=%d\n", __func__, success);
+ pr_err("%s: null filter.\n", __func__);
return -EINVAL;
}
@@ -2633,7 +2633,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
}
if ((buffer1_len + buffer2_len) == 0) {
- if (success == DMX_CRC_ERROR) {
+ if (buffer1 == NULL && buffer2 == NULL) {
/* Section was dropped due to CRC error */
event.type = DMX_EVENT_SECTION_CRC_ERROR;
dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
@@ -2671,11 +2671,6 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
event.params.section.actual_length =
event.params.section.total_length;
- if (success == DMX_MISSED_ERROR)
- event.params.section.flags = DMX_FILTER_CC_ERROR;
- else
- event.params.section.flags = 0;
-
dvb_dmxdev_add_event(&dmxdevfilter->events, &event);
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
@@ -2687,8 +2682,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed,
- enum dmx_success success)
+ struct dmx_ts_feed *feed)
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
@@ -2697,11 +2691,10 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
ssize_t free;
if (!dmxdevfilter) {
- pr_err("%s: null filter (feed->is_filtering=%d) status=%d\n",
- __func__, feed->is_filtering, success);
+ pr_err("%s: null filter (feed->is_filtering=%d)\n",
+ __func__, feed->is_filtering);
return -EINVAL;
}
-
spin_lock(&dmxdevfilter->dev->lock);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER ||
@@ -2725,36 +2718,8 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
return buffer->error;
}
- if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) {
- if (success == DMX_OK && !events->current_event_data_size) {
- events->current_event_start_offset = buffer->pwrite;
- } else if (success == DMX_OK_PES_END) {
- event.type = DMX_EVENT_NEW_PES;
-
- event.params.pes.actual_length =
- events->current_event_data_size;
- event.params.pes.total_length =
- events->current_event_data_size;
-
- event.params.pes.base_offset =
- events->current_event_start_offset;
- event.params.pes.start_offset =
- events->current_event_start_offset;
-
- event.params.pes.flags = 0;
- event.params.pes.stc = 0;
- event.params.pes.transport_error_indicator_counter = 0;
- event.params.pes.continuity_error_counter = 0;
- event.params.pes.ts_packets_num = 0;
-
- /* Do not report zero length PES */
- if (event.params.pes.total_length)
- dvb_dmxdev_add_event(events, &event);
- events->current_event_data_size = 0;
- }
- } else if (!events->current_event_data_size) {
+ if (!events->current_event_data_size)
events->current_event_start_offset = buffer->pwrite;
- }
/* Verify output buffer has sufficient space, or report overflow */
free = dvb_ringbuffer_free(buffer);
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index d45bcc55b76a..7809770bd1ae 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -550,7 +550,7 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
if (feed->pusi_seen == 0)
return 0;
- ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts, DMX_OK);
+ ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts);
/* Verify TS packet was copied successfully */
if (!ret) {
@@ -582,7 +582,7 @@ static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed,
return 0;
return feed->cb.sec(feed->feed.sec.secbuf, feed->feed.sec.seclen,
- NULL, 0, &f->filter, DMX_OK);
+ NULL, 0, &f->filter);
}
static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
@@ -613,7 +613,7 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed)
/* Notify on CRC error */
feed->cb.sec(NULL, 0, NULL, 0,
- &f->filter, DMX_CRC_ERROR);
+ &f->filter);
return -1;
}
@@ -1256,9 +1256,9 @@ static inline void dvb_dmx_swfilter_output_packet(
*/
if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD)
feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
- 0, &feed->feed.ts, DMX_OK);
+ 0, &feed->feed.ts);
- feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts, DMX_OK);
+ feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts);
/*
* if we output 192 packet with timestamp at tail of packet,
@@ -1266,7 +1266,7 @@ static inline void dvb_dmx_swfilter_output_packet(
*/
if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL)
feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL,
- 0, &feed->feed.ts, DMX_OK);
+ 0, &feed->feed.ts);
if (feed->idx_params.enable)
dvb_dmx_index(feed, buf, timestamp);
@@ -1749,7 +1749,7 @@ void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count)
{
spin_lock(&demux->lock);
- demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts, DMX_OK);
+ demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts);
spin_unlock(&demux->lock);
}
@@ -2520,7 +2520,7 @@ static int dvbdmx_ts_insertion_insert_buffer(struct dmx_ts_feed *ts_feed,
return 0;
}
- feed->cb.ts(data, size, NULL, 0, ts_feed, DMX_OK);
+ feed->cb.ts(data, size, NULL, 0, ts_feed);
spin_unlock(&demux->lock);
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 454584a8bf17..ce4332e80a91 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -761,8 +761,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
static int dvb_net_ts_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_ts_feed *feed,
- enum dmx_success success)
+ struct dmx_ts_feed *feed)
{
struct net_device *dev = feed->priv;
@@ -871,8 +870,7 @@ static void dvb_net_sec(struct net_device *dev,
static int dvb_net_sec_callback(const u8 *buffer1, size_t buffer1_len,
const u8 *buffer2, size_t buffer2_len,
- struct dmx_section_filter *filter,
- enum dmx_success success)
+ struct dmx_section_filter *filter)
{
struct net_device *dev = filter->priv;
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 03a61407aef8..feede3a14e07 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -1427,7 +1427,6 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
if (!cb || !dev) {
pr_err("Error: invalid input params\n");
@@ -1465,21 +1464,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
goto end;
}
- /*
- * Set the domain attributes
- * disable L2 redirect since it decreases
- * performance
- */
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- pr_err("Error: couldn't disable coherent HTW\n");
- rc = -ENODEV;
- goto err_set_attr;
- }
return 0;
-err_set_attr:
- arm_iommu_release_mapping(cb->mapping);
end:
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d76de3fbb3ed..9e68af87b86c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1943,7 +1943,7 @@ static void msm_vfe40_stats_cfg_wm_reg(
stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
- msm_camera_io_w(stream_info->framedrop_period << 2,
+ msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
vfe_dev->vfe_base + stats_base + 0x8);
/*WR_IRQ_FRAMEDROP_PATTERN*/
msm_camera_io_w(stream_info->framedrop_pattern,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 87eaa983087a..fb4f7a1dcc92 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -1590,7 +1590,7 @@ static void msm_vfe44_stats_cfg_wm_reg(
if (stats_idx == STATS_IDX_BF_SCALE)
return;
/*WR_ADDR_CFG*/
- msm_camera_io_w(stream_info->framedrop_period << 2,
+ msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
vfe_dev->vfe_base + stats_base + 0x8);
/*WR_IRQ_FRAMEDROP_PATTERN*/
msm_camera_io_w(stream_info->framedrop_pattern,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index ee0c5609c261..d45b6ff0a7d0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -1680,7 +1680,7 @@ static void msm_vfe46_stats_cfg_wm_reg(
return;
/* WR_ADDR_CFG */
- msm_camera_io_w(stream_info->framedrop_period << 2,
+ msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
vfe_dev->vfe_base + stats_base + 0x8);
/* WR_IRQ_FRAMEDROP_PATTERN */
msm_camera_io_w(stream_info->framedrop_pattern,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 303fc9bac8c2..6d1ad8ef6804 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -30,10 +30,13 @@
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
#define VFE47_8996V1_VERSION 0x70000000
+#define VFE48_SDM660_VERSION 0x80000003
#define VFE47_BURST_LEN 3
+#define VFE48_SDM660_BURST_LEN 4
#define VFE47_FETCH_BURST_LEN 3
#define VFE47_STATS_BURST_LEN 3
+#define VFE48_SDM660_STATS_BURST_LEN 4
#define VFE47_UB_SIZE_VFE0 2048
#define VFE47_UB_SIZE_VFE1 1536
#define VFE47_UB_STATS_SIZE 144
@@ -359,13 +362,13 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
else
id = CAM_AHB_CLIENT_VFE1;
+ vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev, &rate);
+
if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
pr_err("%s: failed to vote for AHB\n", __func__);
vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
- vfe_dev->hw_info->vfe_ops.platform_ops.set_clk_rate(vfe_dev, &rate);
-
vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
vfe_dev, 0);
vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
@@ -1504,7 +1507,7 @@ void msm_vfe47_axi_cfg_wm_reg(
{
uint32_t val;
int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
- uint32_t wm_base;
+ uint32_t wm_base, burst_len;
wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
@@ -1522,7 +1525,11 @@ void msm_vfe47_axi_cfg_wm_reg(
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
/* WR_BUFFER_CFG */
- val = VFE47_BURST_LEN |
+ if (vfe_dev->vfe_hw_version == VFE48_SDM660_VERSION)
+ burst_len = VFE48_SDM660_BURST_LEN;
+ else
+ burst_len = VFE47_BURST_LEN;
+ val = burst_len |
(stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1) <<
2 |
@@ -2055,7 +2062,7 @@ void msm_vfe47_stats_cfg_wm_reg(
stats_base = VFE47_STATS_BASE(stats_idx);
/* WR_ADDR_CFG */
- msm_camera_io_w(stream_info->framedrop_period << 2,
+ msm_camera_io_w((stream_info->framedrop_period - 1) << 2,
vfe_dev->vfe_base + stats_base + 0x10);
/* WR_IRQ_FRAMEDROP_PATTERN */
msm_camera_io_w(stream_info->framedrop_pattern,
@@ -2089,7 +2096,7 @@ void msm_vfe47_stats_clear_wm_reg(
void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev)
{
int i;
- uint32_t ub_offset = 0;
+ uint32_t ub_offset = 0, stats_burst_len;
uint32_t ub_size[VFE47_NUM_STATS_TYPE] = {
16, /* MSM_ISP_STATS_HDR_BE */
16, /* MSM_ISP_STATS_BG */
@@ -2108,9 +2115,14 @@ void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev)
else
pr_err("%s: incorrect VFE device\n", __func__);
+ if (vfe_dev->vfe_hw_version == VFE48_SDM660_VERSION)
+ stats_burst_len = VFE48_SDM660_STATS_BURST_LEN;
+ else
+ stats_burst_len = VFE47_STATS_BURST_LEN;
+
for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
ub_offset -= ub_size[i];
- msm_camera_io_w(VFE47_STATS_BURST_LEN << 30 |
+ msm_camera_io_w(stats_burst_len << 30 |
ub_offset << 16 | (ub_size[i] - 1),
vfe_dev->vfe_base + VFE47_STATS_BASE(i) + 0x14);
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index dfa91bafc776..f1103183c326 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -1263,7 +1263,7 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
stream_info->framedrop_pattern = 0x0;
else
stream_info->framedrop_pattern = 0x1;
- stream_info->framedrop_period = framedrop_period - 1;
+ stream_info->framedrop_period = framedrop_period;
if (stream_info->init_stats_frame_drop == 0)
for (k = 0; k < stream_info->num_isp; k++)
stream_info->vfe_dev[k]->hw_info->
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 3ac4c3af3208..e0d6977b24a6 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -114,6 +114,13 @@ static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
bool status);
static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, uint32_t id, void *arg);
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask);
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len);
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -636,6 +643,127 @@ static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
return rc;
}
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info)
+{
+ int32_t s_base, p_base;
+ uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
+ uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
+ uint32_t rd_ref_off, wr_ref_off;
+ uint32_t s_size, p_size;
+ uint8_t tnr_enabled, ubwc_enabled, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = frame_info->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ s_base = cpp_dev->payload_params.stripe_base;
+ s_size = cpp_dev->payload_params.stripe_size;
+ p_base = cpp_dev->payload_params.plane_base;
+ p_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr1_off = wr0_off + 1;
+ wr2_off = wr1_off + 1;
+ wr3_off = wr2_off + 1;
+ /* Reference engine offsets */
+ rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr0_mdata_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr1_mdata_off = (wr0_mdata_off + 1);
+ wr2_mdata_off = (wr1_mdata_off + 1);
+ wr3_mdata_off = (wr2_mdata_off + 1);
+
+ tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
+
+ for (i = 0; i < frame_info->num_strips; i++) {
+ pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
+ i, cpp_frame_msg[s_base + rd_off + i * s_size],
+ cpp_frame_msg[s_base + wr0_off + i * s_size],
+ cpp_frame_msg[s_base + wr1_off + i * s_size],
+ cpp_frame_msg[s_base + wr2_off + i * s_size],
+ cpp_frame_msg[s_base + wr3_off + i * s_size]);
+
+ if (tnr_enabled) {
+ pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size],
+ cpp_frame_msg[s_base + wr_ref_off + i * s_size]
+ );
+ }
+
+ if (cds_en) {
+ pr_err("stripe %d:, dsdn_off %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size]
+ );
+ }
+
+ if (ubwc_enabled) {
+ pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
+ cpp_frame_msg[s_base + wr0_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr1_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr2_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr3_mdata_off +
+ i * s_size]
+ );
+ }
+
+ }
+ return 0;
+}
+
+static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ int32_t i = 0, queue_len = 0;
+ struct msm_device_queue *queue = NULL;
+
+ if (token) {
+ cpp_dev = token;
+ disable_irq(cpp_dev->irq->start);
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer_sync(&cpp_timer.cpp_timer);
+ }
+ mutex_lock(&cpp_dev->mutex);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ for (i = 0; i < queue_len; i++) {
+ if (cpp_timer.data.processed_frame[i]) {
+ processed_frame[i] =
+ cpp_timer.data.processed_frame[i];
+ pr_err("Fault on identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+ msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
+ msm_cpp_dump_frame_cmd(processed_frame[i]);
+ }
+ }
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ }
+}
static int cpp_init_mem(struct cpp_device *cpp_dev)
{
@@ -652,6 +780,9 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
return -ENODEV;
cpp_dev->iommu_hdl = iommu_hdl;
+ cam_smmu_reg_client_page_fault_handler(
+ cpp_dev->iommu_hdl,
+ msm_cpp_iommu_fault_handler, cpp_dev);
return 0;
}
@@ -2617,14 +2748,14 @@ static int msm_cpp_validate_input(unsigned int cmd, void *arg,
break;
default: {
if (ioctl_ptr == NULL) {
- pr_err("Wrong ioctl_ptr %pK\n", ioctl_ptr);
+ pr_err("Wrong ioctl_ptr for cmd %u\n", cmd);
return -EINVAL;
}
*ioctl_ptr = arg;
if ((*ioctl_ptr == NULL) ||
- ((*ioctl_ptr)->ioctl_ptr == NULL)) {
- pr_err("Wrong arg %pK\n", arg);
+ (*ioctl_ptr)->ioctl_ptr == NULL) {
+ pr_err("Error invalid ioctl argument cmd %u", cmd);
return -EINVAL;
}
break;
@@ -2649,6 +2780,12 @@ long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("cpp_dev is null\n");
return -EINVAL;
}
+
+ if (_IOC_DIR(cmd) == _IOC_NONE) {
+ pr_err("Invalid ioctl/subdev cmd %u", cmd);
+ return -EINVAL;
+ }
+
rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr);
if (rc != 0) {
pr_err("input validation failed\n");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index bf3973888573..a700f836061c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -1559,11 +1559,13 @@ static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n");
return -EINVAL;
}
+ mutex_lock(a_ctrl->actuator_mutex);
rc = msm_actuator_power_down(a_ctrl);
if (rc < 0) {
pr_err("%s:%d Actuator Power down failed\n",
__func__, __LINE__);
}
+ mutex_unlock(a_ctrl->actuator_mutex);
return msm_actuator_close(sd, NULL);
default:
return -ENOIOCTLCMD;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
index 79d7d94582c5..bba0b2bc9cdb 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
@@ -50,11 +50,11 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_1_3ph = {
{0x148, 0xFE},
{0x14C, 0x1},
{0x154, 0x0},
- {0x15C, 0x23},
+ {0x15C, 0x63},
{0x160, ULPM_WAKE_UP_TIMER_MODE},
{0x164, 0x00},
- {0x168, 0xA0},
- {0x16C, 0x25},
+ {0x168, 0xAC},
+ {0x16C, 0xA5},
{0x170, 0x41},
{0x174, 0x41},
{0x178, 0x3E},
@@ -98,7 +98,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_1_3ph = {
{0x0, 0x91},
{0x70C, 0xA5},
{0x38, 0xFE},
- {0x81c, 0x6},
+ {0x81c, 0x2},
};
struct csiphy_settings_t csiphy_combo_mode_v5_0_1 = {
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
index d1e3c090c972..fb8f0c4bae37 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -4186,7 +4186,7 @@ static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
mpq_feed->sdmx_buf.size) {
feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
header->payload_length,
- NULL, 0, &f->filter, DMX_OK);
+ NULL, 0, &f->filter);
} else {
int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread;
@@ -4194,7 +4194,7 @@ static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
split,
&mpq_feed->sdmx_buf.data[0],
header->payload_length - split,
- &f->filter, DMX_OK);
+ &f->filter);
}
return 0;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 930b18abd71b..b88f03ce89ae 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1123,6 +1123,13 @@ static int sde_rotator_try_fmt_vid_cap(struct file *file,
struct sde_rotation_config config;
int ret;
+ if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "Not supporting 0 width/height: %dx%d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
+ return -EINVAL;
+ }
+
sde_rot_mgr_lock(rot_dev->mgr);
sde_rotator_get_config_from_ctx(ctx, &config);
config.output.format = f->fmt.pix.pixelformat;
@@ -1162,6 +1169,13 @@ static int sde_rotator_try_fmt_vid_out(struct file *file,
struct sde_rotation_config config;
int ret;
+ if ((f->fmt.pix.width == 0) || (f->fmt.pix.height == 0)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "Not supporting 0 width/height: %dx%d\n",
+ f->fmt.pix.width, f->fmt.pix.height);
+ return -EINVAL;
+ }
+
sde_rot_mgr_lock(rot_dev->mgr);
sde_rotator_get_config_from_ctx(ctx, &config);
config.input.format = f->fmt.pix.pixelformat;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 7bbd8aa53342..c11c4b61d832 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -448,7 +448,6 @@ int sde_smmu_probe(struct platform_device *pdev)
struct sde_smmu_domain smmu_domain;
const struct of_device_id *match;
struct sde_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
if (!mdata) {
@@ -535,13 +534,6 @@ int sde_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- SDEROT_ERR("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index d9f47978a081..becea0c59521 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -667,7 +667,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
int rc = 0;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDC_VIDEO_MVC_BUFFER_LAYOUT:
- if (inst->fmts[OUTPUT_PORT]->fourcc != V4L2_PIX_FMT_H264_MVC) {
+ if (inst->fmts[OUTPUT_PORT].fourcc != V4L2_PIX_FMT_H264_MVC) {
dprintk(VIDC_ERR, "Control %#x only valid for MVC\n",
ctrl->id);
rc = -ENOTSUPP;
@@ -675,7 +675,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
}
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC &&
+ if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
ctrl->val != V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) {
dprintk(VIDC_ERR,
"Profile %#x not supported for MVC\n",
@@ -685,7 +685,7 @@ static int is_ctrl_valid_for_codec(struct msm_vidc_inst *inst,
}
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- if (inst->fmts[OUTPUT_PORT]->fourcc == V4L2_PIX_FMT_H264_MVC &&
+ if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_H264_MVC &&
ctrl->val >= V4L2_MPEG_VIDEO_H264_LEVEL_5_2) {
dprintk(VIDC_ERR, "Level %#x not supported for MVC\n",
ctrl->val);
@@ -896,10 +896,10 @@ int msm_vdec_prepare_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, allocated: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -975,10 +975,10 @@ int msm_vdec_release_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, to release: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes, b->length);
+ inst->fmts[CAPTURE_PORT].num_planes, b->length);
rc = -EINVAL;
break;
}
@@ -1099,9 +1099,9 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
hdev = inst->core->device;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
- fmt = inst->fmts[OUTPUT_PORT];
+ fmt = &inst->fmts[OUTPUT_PORT];
else
return -ENOTSUPP;
@@ -1250,6 +1250,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto err_invalid_fmt;
}
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
@@ -1257,7 +1259,6 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
msm_comm_get_hal_output_buffer(inst),
f->fmt.pix_mp.pixelformat);
- inst->fmts[fmt->type] = fmt;
if (msm_comm_get_stream_output_mode(inst) ==
HAL_VIDEO_DECODER_SECONDARY) {
frame_sz.buffer_type = HAL_BUFFER_OUTPUT2;
@@ -1272,10 +1273,10 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
}
f->fmt.pix_mp.plane_fmt[0].sizeimage =
- fmt->get_frame_size(0,
+ inst->fmts[fmt->type].get_frame_size(0,
f->fmt.pix_mp.height, f->fmt.pix_mp.width);
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
VENUS_EXTRADATA_SIZE(
@@ -1283,8 +1284,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
inst->prop.width[CAPTURE_PORT]);
}
- f->fmt.pix_mp.num_planes = fmt->num_planes;
- for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
@@ -1303,6 +1304,8 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto err_invalid_fmt;
}
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
rc = msm_comm_try_state(inst, MSM_VIDC_CORE_INIT_DONE);
if (rc) {
@@ -1310,17 +1313,16 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
goto err_invalid_fmt;
}
- if (!(get_hal_codec(fmt->fourcc) &
+ if (!(get_hal_codec(inst->fmts[fmt->type].fourcc) &
inst->core->dec_codec_supported)) {
dprintk(VIDC_ERR,
"Codec(%#x) is not present in the supported codecs list(%#x)\n",
- get_hal_codec(fmt->fourcc),
+ get_hal_codec(inst->fmts[fmt->type].fourcc),
inst->core->dec_codec_supported);
rc = -EINVAL;
goto err_invalid_fmt;
}
- inst->fmts[fmt->type] = fmt;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -1343,14 +1345,15 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
frame_sz.height);
msm_comm_try_set_prop(inst, HAL_PARAM_FRAME_SIZE, &frame_sz);
- max_input_size = get_frame_size(inst, fmt, f->type, 0);
+ max_input_size = get_frame_size(
+inst, &inst->fmts[fmt->type], f->type, 0);
if (f->fmt.pix_mp.plane_fmt[0].sizeimage > max_input_size ||
!f->fmt.pix_mp.plane_fmt[0].sizeimage) {
f->fmt.pix_mp.plane_fmt[0].sizeimage = max_input_size;
}
- f->fmt.pix_mp.num_planes = fmt->num_planes;
- for (i = 0; i < fmt->num_planes; ++i) {
+ f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
@@ -1464,20 +1467,20 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
- *num_planes = inst->fmts[OUTPUT_PORT]->num_planes;
+ *num_planes = inst->fmts[OUTPUT_PORT].num_planes;
if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
for (i = 0; i < *num_planes; i++) {
sizes[i] = get_frame_size(inst,
- inst->fmts[OUTPUT_PORT], q->type, i);
+ &inst->fmts[OUTPUT_PORT], q->type, i);
}
rc = set_actual_buffer_count(inst, *num_buffers,
HAL_BUFFER_INPUT);
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
dprintk(VIDC_DBG, "Getting bufreqs on capture plane\n");
- *num_planes = inst->fmts[CAPTURE_PORT]->num_planes;
+ *num_planes = inst->fmts[CAPTURE_PORT].num_planes;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -1562,7 +1565,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
}
extra_idx =
- EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
sizes[extra_idx] =
VENUS_EXTRADATA_SIZE(
@@ -1663,7 +1666,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
unsigned int buffer_size;
struct msm_vidc_format *fmt = NULL;
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
buffer_size = fmt->get_frame_size(0,
inst->prop.height[CAPTURE_PORT],
inst->prop.width[CAPTURE_PORT]);
@@ -1885,8 +1888,6 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
return -EINVAL;
}
- inst->fmts[OUTPUT_PORT] = &vdec_formats[2];
- inst->fmts[CAPTURE_PORT] = &vdec_formats[0];
inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
@@ -1902,6 +1903,10 @@ int msm_vdec_inst_init(struct msm_vidc_inst *inst)
inst->buffer_mode_set[OUTPUT_PORT] = HAL_BUFFER_MODE_STATIC;
inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
inst->prop.fps = DEFAULT_FPS;
+ memcpy(&inst->fmts[OUTPUT_PORT], &vdec_formats[2],
+ sizeof(struct msm_vidc_format));
+ memcpy(&inst->fmts[CAPTURE_PORT], &vdec_formats[0],
+ sizeof(struct msm_vidc_format));
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 99f30d9cb97b..f071aae3ccab 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1400,6 +1400,49 @@ static struct msm_vidc_format venc_formats[] = {
},
};
+static void msm_venc_update_plane_count(struct msm_vidc_inst *inst, int type)
+{
+ struct v4l2_ctrl *ctrl = NULL;
+ u32 extradata = 0;
+
+ if (!inst)
+ return;
+
+ inst->fmts[type].num_planes = 1;
+
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+ V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
+
+ if (ctrl)
+ extradata = v4l2_ctrl_g_ctrl(ctrl);
+
+ if (type == CAPTURE_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+ case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+ case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+ inst->fmts[CAPTURE_PORT].num_planes = 2;
+ default:
+ break;
+ }
+ } else if (type == OUTPUT_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+ case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+ case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+ case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+ case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+ inst->fmts[OUTPUT_PORT].num_planes = 2;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int msm_venc_set_csc(struct msm_vidc_inst *inst);
static int msm_venc_queue_setup(struct vb2_queue *q,
@@ -1414,8 +1457,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
enum hal_property property_id;
struct hfi_device *hdev;
struct hal_buffer_requirements *buff_req;
- struct v4l2_ctrl *ctrl = NULL;
- u32 extradata = 0, extra_idx = 0;
+ u32 extra_idx = 0;
struct hal_buffer_requirements *buff_req_buffer = NULL;
if (!q || !q->drv_priv) {
@@ -1471,21 +1513,8 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
temp, *num_buffers);
}
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
- case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
- case V4L2_MPEG_VIDC_EXTRADATA_LTR:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
- *num_planes = *num_planes + 1;
- default:
- break;
- }
- inst->fmts[CAPTURE_PORT]->num_planes = *num_planes;
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ *num_planes = inst->fmts[CAPTURE_PORT].num_planes;
for (i = 0; i < *num_planes; i++) {
int extra_idx = EXTRADATA_IDX(*num_planes);
@@ -1543,24 +1572,9 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
*num_buffers);
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
- case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
- case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
- case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
- case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
- case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
- *num_planes = *num_planes + 1;
- break;
- default:
- break;
- }
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ *num_planes = inst->fmts[OUTPUT_PORT].num_planes;
- inst->fmts[OUTPUT_PORT]->num_planes = *num_planes;
rc = call_hfi_op(hdev, session_set_property, inst->session,
property_id, &new_buf_count);
if (rc)
@@ -1570,12 +1584,12 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
inst->buff_req.buffer[0].buffer_size,
inst->buff_req.buffer[0].buffer_alignment,
inst->buff_req.buffer[0].buffer_count_actual);
- sizes[0] = inst->fmts[OUTPUT_PORT]->get_frame_size(
+ sizes[0] = inst->fmts[OUTPUT_PORT].get_frame_size(
0, inst->prop.height[OUTPUT_PORT],
inst->prop.width[OUTPUT_PORT]);
extra_idx =
- EXTRADATA_IDX(inst->fmts[OUTPUT_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[OUTPUT_PORT].num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
buff_req_buffer = get_buff_req_buffer(inst,
HAL_BUFFER_EXTRADATA_INPUT);
@@ -1610,7 +1624,7 @@ static int msm_venc_toggle_hier_p(struct msm_vidc_inst *inst, int layers)
return -EINVAL;
}
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_VP8)
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
return 0;
num_enh_layers = layers ? : 0;
@@ -2177,10 +2191,10 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD:
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_H264 &&
- inst->fmts[CAPTURE_PORT]->fourcc !=
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
+ inst->fmts[CAPTURE_PORT].fourcc !=
V4L2_PIX_FMT_H264_NO_SC &&
- inst->fmts[CAPTURE_PORT]->fourcc !=
+ inst->fmts[CAPTURE_PORT].fourcc !=
V4L2_PIX_FMT_HEVC) {
dprintk(VIDC_ERR,
"Control %#x only valid for H264 and HEVC\n",
@@ -2669,8 +2683,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE: {
bool codec_avc =
- inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264 ||
- inst->fmts[CAPTURE_PORT]->fourcc ==
+ inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264 ||
+ inst->fmts[CAPTURE_PORT].fourcc ==
V4L2_PIX_FMT_H264_NO_SC;
temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE);
@@ -2696,8 +2710,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
cir_mbs = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS);
is_cont_intra_supported =
- (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_H264) ||
- (inst->fmts[CAPTURE_PORT]->fourcc == V4L2_PIX_FMT_HEVC);
+ (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_H264) ||
+ (inst->fmts[CAPTURE_PORT].fourcc == V4L2_PIX_FMT_HEVC);
if (is_cont_intra_supported) {
if (ctrl->val != HAL_INTRA_REFRESH_NONE)
@@ -3054,7 +3068,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
- if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_HEVC) {
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_HEVC) {
dprintk(VIDC_ERR, "Hier B supported for HEVC only\n");
rc = -ENOTSUPP;
break;
@@ -3483,8 +3497,6 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst)
dprintk(VIDC_ERR, "Invalid input = %pK\n", inst);
return -EINVAL;
}
- inst->fmts[CAPTURE_PORT] = &venc_formats[4];
- inst->fmts[OUTPUT_PORT] = &venc_formats[0];
inst->prop.height[CAPTURE_PORT] = DEFAULT_HEIGHT;
inst->prop.width[CAPTURE_PORT] = DEFAULT_WIDTH;
inst->prop.height[OUTPUT_PORT] = DEFAULT_HEIGHT;
@@ -3501,6 +3513,10 @@ int msm_venc_inst_init(struct msm_vidc_inst *inst)
inst->buffer_mode_set[CAPTURE_PORT] = HAL_BUFFER_MODE_STATIC;
inst->prop.fps = DEFAULT_FPS;
inst->capability.pixelprocess_capabilities = 0;
+ memcpy(&inst->fmts[CAPTURE_PORT], &venc_formats[4],
+ sizeof(struct msm_vidc_format));
+ memcpy(&inst->fmts[OUTPUT_PORT], &venc_formats[0],
+ sizeof(struct msm_vidc_format));
return rc;
}
@@ -3624,7 +3640,11 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
goto exit;
}
- inst->fmts[fmt->type] = fmt;
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
+
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ fmt->num_planes = inst->fmts[CAPTURE_PORT].num_planes;
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
@@ -3676,7 +3696,11 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
rc = -EINVAL;
goto exit;
}
- inst->fmts[fmt->type] = fmt;
+ memcpy(&inst->fmts[fmt->type], fmt,
+ sizeof(struct msm_vidc_format));
+
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ fmt->num_planes = inst->fmts[OUTPUT_PORT].num_planes;
msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
} else {
@@ -3717,12 +3741,12 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
struct hal_buffer_requirements *bufreq = NULL;
int extra_idx = 0;
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < inst->fmts[fmt->type].num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
- fmt->get_frame_size(i,
+ inst->fmts[fmt->type].get_frame_size(i,
f->fmt.pix_mp.height, f->fmt.pix_mp.width);
}
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[fmt->type].num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
bufreq = get_buff_req_buffer(inst,
HAL_BUFFER_EXTRADATA_INPUT);
@@ -3739,7 +3763,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
const struct msm_vidc_format *fmt = NULL;
int rc = 0;
int i;
- u32 height, width;
+ u32 height, width, num_planes;
unsigned int extra_idx = 0;
struct hal_buffer_requirements *bufreq = NULL;
@@ -3757,13 +3781,17 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = inst->fmts[CAPTURE_PORT];
+ fmt = &inst->fmts[CAPTURE_PORT];
height = inst->prop.height[CAPTURE_PORT];
width = inst->prop.width[CAPTURE_PORT];
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ num_planes = inst->fmts[CAPTURE_PORT].num_planes;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- fmt = inst->fmts[OUTPUT_PORT];
+ fmt = &inst->fmts[OUTPUT_PORT];
height = inst->prop.height[OUTPUT_PORT];
width = inst->prop.width[OUTPUT_PORT];
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ num_planes = inst->fmts[OUTPUT_PORT].num_planes;
} else {
dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
return -ENOTSUPP;
@@ -3772,10 +3800,10 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.pixelformat = fmt->fourcc;
f->fmt.pix_mp.height = height;
f->fmt.pix_mp.width = width;
- f->fmt.pix_mp.num_planes = fmt->num_planes;
+ f->fmt.pix_mp.num_planes = num_planes;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
fmt->get_frame_size(i, height, width);
}
@@ -3786,7 +3814,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.plane_fmt[0].sizeimage =
bufreq ? bufreq->buffer_size : 0;
}
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
bufreq = get_buff_req_buffer(inst,
@@ -3799,7 +3827,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
bufreq ? bufreq->buffer_size : 0;
}
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
@@ -3864,10 +3892,10 @@ int msm_venc_prepare_buf(struct msm_vidc_inst *inst,
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
- if (b->length != inst->fmts[CAPTURE_PORT]->num_planes) {
+ if (b->length != inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, allocated: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -3935,10 +3963,10 @@ int msm_venc_release_buf(struct msm_vidc_inst *inst,
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: {
if (b->length !=
- inst->fmts[CAPTURE_PORT]->num_planes) {
+ inst->fmts[CAPTURE_PORT].num_planes) {
dprintk(VIDC_ERR,
"Planes mismatch: needed: %d, to release: %d\n",
- inst->fmts[CAPTURE_PORT]->num_planes,
+ inst->fmts[CAPTURE_PORT].num_planes,
b->length);
rc = -EINVAL;
break;
@@ -4053,4 +4081,3 @@ int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
return msm_comm_ctrl_init(inst, msm_venc_ctrls,
ARRAY_SIZE(msm_venc_ctrls), &msm_venc_ctrl_ops);
}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 437ad43e23e9..b12eeddc678f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -682,7 +682,7 @@ static bool valid_v4l2_buffer(struct v4l2_buffer *b,
MAX_PORT_NUM;
return port != MAX_PORT_NUM &&
- inst->fmts[port]->num_planes == b->length;
+ inst->fmts[port].num_planes == b->length;
}
int msm_vidc_prepare_buf(void *instance, struct v4l2_buffer *b)
@@ -849,7 +849,7 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
dprintk(VIDC_DBG, "Queueing device address = %pa\n",
&binfo->device_addr[i]);
- if (inst->fmts[OUTPUT_PORT]->fourcc ==
+ if (inst->fmts[OUTPUT_PORT].fourcc ==
V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
rc = msm_comm_smem_cache_operations(inst,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 40643239712f..d1cc08d53017 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -521,12 +521,12 @@ static int msm_comm_vote_bus(struct msm_vidc_core *core)
struct v4l2_control ctrl;
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
yuv = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[CAPTURE_PORT]->fourcc :
- inst->fmts[OUTPUT_PORT]->fourcc;
+ inst->fmts[CAPTURE_PORT].fourcc :
+ inst->fmts[OUTPUT_PORT].fourcc;
vote_data[i].domain = get_hal_domain(inst->session_type);
vote_data[i].codec = get_hal_codec(codec);
@@ -1004,8 +1004,8 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data)
core = inst->core;
hdev = inst->core->device;
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
/* check if capabilities are available for this session */
for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
@@ -2028,7 +2028,7 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
ns_to_timeval(time_usec * NSEC_PER_USEC);
vbuf->flags = 0;
extra_idx =
- EXTRADATA_IDX(inst->fmts[CAPTURE_PORT]->num_planes);
+ EXTRADATA_IDX(inst->fmts[CAPTURE_PORT].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
vb->planes[extra_idx].m.userptr =
(unsigned long)fill_buf_done->extra_data_buffer;
@@ -2279,8 +2279,8 @@ int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
list_for_each_entry(inst, &core->instances, list) {
codec = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
if (msm_comm_turbo_session(inst))
clk_scale_data.power_mode[num_sessions] =
@@ -2711,9 +2711,9 @@ static int msm_comm_session_init(int flipped_state,
goto exit;
}
if (inst->session_type == MSM_VIDC_DECODER) {
- fourcc = inst->fmts[OUTPUT_PORT]->fourcc;
+ fourcc = inst->fmts[OUTPUT_PORT].fourcc;
} else if (inst->session_type == MSM_VIDC_ENCODER) {
- fourcc = inst->fmts[CAPTURE_PORT]->fourcc;
+ fourcc = inst->fmts[CAPTURE_PORT].fourcc;
} else {
dprintk(VIDC_ERR, "Invalid session\n");
return -EINVAL;
@@ -3601,7 +3601,7 @@ static void populate_frame_data(struct vidc_frame_data *data,
data->buffer_type = msm_comm_get_hal_output_buffer(inst);
}
- extra_idx = EXTRADATA_IDX(inst->fmts[port]->num_planes);
+ extra_idx = EXTRADATA_IDX(inst->fmts[port].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES &&
vb->planes[extra_idx].m.userptr) {
data->extradata_addr = vb->planes[extra_idx].m.userptr;
@@ -4792,9 +4792,20 @@ int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
return -EINVAL;
}
hdev = core->device;
- if (core->state == VIDC_CORE_INIT_DONE)
+ if (core->state == VIDC_CORE_INIT_DONE) {
+ /*
+ * In current implementation user-initiated SSR triggers
+ * a fatal error from hardware. However, there is no way
+ * to know if fatal error is due to SSR or not. Handle
+ * user SSR as non-fatal.
+ */
+ mutex_lock(&core->lock);
+ core->resources.debug_timeout = false;
+ mutex_unlock(&core->lock);
rc = call_hfi_op(hdev, core_trigger_ssr,
hdev->hfi_device_data, type);
+ }
+
return rc;
}
@@ -5265,7 +5276,7 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
dprintk(VIDC_ERR,
"%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
- is_decode ? "Decode" : "Encode", inst->fmts[port]->name,
+ is_decode ? "Decode" : "Encode", inst->fmts[port].name,
inst->prop.height[port], inst->prop.width[port],
inst->prop.fps, inst->prop.bitrate,
!inst->bit_depth ? "8" : "10");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index 9e67ef096c63..3cd1c38f8f37 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -237,8 +237,8 @@ void msm_dcvs_init_load(struct msm_vidc_inst *inst)
}
fourcc = inst->session_type == MSM_VIDC_DECODER ?
- inst->fmts[OUTPUT_PORT]->fourcc :
- inst->fmts[CAPTURE_PORT]->fourcc;
+ inst->fmts[OUTPUT_PORT].fourcc :
+ inst->fmts[CAPTURE_PORT].fourcc;
for (i = 0; i < num_rows; i++) {
bool matches = msm_dcvs_check_codec_supported(
@@ -589,7 +589,7 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
}
is_codec_supported =
msm_dcvs_check_codec_supported(
- inst->fmts[OUTPUT_PORT]->fourcc,
+ inst->fmts[OUTPUT_PORT].fourcc,
inst->dcvs.supported_codecs,
inst->session_type);
if (!is_codec_supported ||
@@ -599,15 +599,15 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
if (inst->session_type == MSM_VIDC_ENCODER) {
inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
is_codec_supported =
msm_dcvs_check_codec_supported(
- inst->fmts[CAPTURE_PORT]->fourcc,
+ inst->fmts[CAPTURE_PORT].fourcc,
inst->dcvs.supported_codecs,
inst->session_type);
if (!is_codec_supported ||
@@ -617,8 +617,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
dcvs_decision_done:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
index d3027c08d24e..efb90c69881f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c
@@ -289,10 +289,10 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
for (i = 0; i < MAX_PORT_NUM; i++) {
write_str(&dbg_buf, "capability: %s\n", i == OUTPUT_PORT ?
"Output" : "Capture");
- write_str(&dbg_buf, "name : %s\n", inst->fmts[i]->name);
- write_str(&dbg_buf, "planes : %d\n", inst->fmts[i]->num_planes);
+ write_str(&dbg_buf, "name : %s\n", inst->fmts[i].name);
+ write_str(&dbg_buf, "planes : %d\n", inst->fmts[i].num_planes);
write_str(
- &dbg_buf, "type: %s\n", inst->fmts[i]->type == OUTPUT_PORT ?
+ &dbg_buf, "type: %s\n", inst->fmts[i].type == OUTPUT_PORT ?
"Output" : "Capture");
switch (inst->buffer_mode_set[i]) {
case HAL_BUFFER_MODE_STATIC:
@@ -311,7 +311,7 @@ static ssize_t inst_info_read(struct file *file, char __user *buf,
write_str(&dbg_buf, "count: %u\n",
inst->bufq[i].vb2_bufq.num_buffers);
- for (j = 0; j < inst->fmts[i]->num_planes; j++)
+ for (j = 0; j < inst->fmts[i].num_planes; j++)
write_str(&dbg_buf, "size for plane %d: %u\n", j,
inst->bufq[i].vb2_bufq.plane_sizes[j]);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index b6e74715ad07..161e94f99040 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -261,7 +261,7 @@ struct msm_vidc_inst {
void *session;
struct session_prop prop;
enum instance_state state;
- struct msm_vidc_format *fmts[MAX_PORT_NUM];
+ struct msm_vidc_format fmts[MAX_PORT_NUM];
struct buf_queue bufq[MAX_PORT_NUM];
struct msm_vidc_list pendingq;
struct msm_vidc_list scratchbufs;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 25fccab99fb3..a3080be8cd7a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1166,7 +1166,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
int secure_vmid = VMID_INVAL;
struct bus_type *bus;
@@ -1192,14 +1191,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
goto remove_cb;
}
- rc = iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR, "%s - disable coherent HTW failed: %s %d\n",
- __func__, dev_name(dev), rc);
- goto release_mapping;
- }
-
if (cb->is_secure) {
secure_vmid = get_secure_vmid(cb);
rc = iommu_domain_set_attr(cb->mapping->domain,
diff --git a/drivers/media/platform/msm/vidc/venus_boot.c b/drivers/media/platform/msm/vidc/venus_boot.c
index 925c97a5b6e8..85c3e15edded 100644
--- a/drivers/media/platform/msm/vidc/venus_boot.c
+++ b/drivers/media/platform/msm/vidc/venus_boot.c
@@ -190,8 +190,6 @@ static int pil_venus_auth_and_reset(void)
{
int rc;
- /* Need to enable this for new SMMU to set the device attribute */
- bool disable_htw = true;
phys_addr_t fw_bias = venus_data->resources->firmware_base;
void __iomem *reg_base = venus_data->reg_base;
u32 ver;
@@ -278,17 +276,6 @@ static int pil_venus_auth_and_reset(void)
if (iommu_present) {
phys_addr_t pa = fw_bias;
- /* Enable this for new SMMU to set the device attribute */
- rc = iommu_domain_set_attr(venus_data->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to disable COHERENT_HTW: %s\n",
- __func__, dev_name(dev));
- goto release_mapping;
- }
-
rc = arm_iommu_attach_device(dev, venus_data->mapping);
if (rc) {
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index e0fb31de38ff..8332c7f4db43 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3336,7 +3336,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
{
bool local_packet = false;
enum vidc_msg_prio log_level = VIDC_FW;
- unsigned int pending_packet_count = 0;
if (!device) {
dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
@@ -3361,23 +3360,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
log_level = VIDC_ERR;
}
- /*
- * In FATAL situation, print all the pending messages in msg
- * queue. This is useful for debugging. At this time, message
- * queues may be corrupted. Hence don't trust them and just print
- * first max_packets packets.
- */
-
- if (local_packet) {
- dprintk(VIDC_ERR,
- "Printing all pending messages in message Queue\n");
- while (!__iface_msgq_read(device, packet) &&
- pending_packet_count < max_packets) {
- __dump_packet(packet, log_level);
- pending_packet_count++;
- }
- }
-
while (!__iface_dbgq_read(device, packet)) {
struct hfi_msg_sys_coverage_packet *pkt =
(struct hfi_msg_sys_coverage_packet *) packet;
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index 02ddf3225af8..3ed3d125f430 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -17,6 +17,18 @@
#include <linux/device.h>
#include "wcd9xxx-regmap.h"
+
+static const struct reg_sequence wcd934x_1_1_defaults[] = {
+ { WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0, 0x01 },
+ { WCD934X_BIAS_VBG_FINE_ADJ, 0x75 },
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x0E },
+ { WCD934X_EAR_DAC_CTL_ATEST, 0x08 },
+ { WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x17 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x81 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x81 },
+};
+
static const struct reg_default wcd934x_defaults[] = {
{ WCD934X_PAGE0_PAGE_REGISTER, 0x00 },
{ WCD934X_CODEC_RPM_CLK_BYPASS, 0x00 },
@@ -1803,6 +1815,37 @@ static const struct reg_default wcd934x_defaults[] = {
{ WCD934X_TEST_DEBUG_CODEC_DIAGS, 0x00 },
};
+/*
+ * wcd934x_regmap_register_patch: Update register defaults based on version
+ * @regmap: handle to wcd9xxx regmap
+ * @version: wcd934x version
+ *
+ * Returns error code in case of failure or 0 for success
+ */
+int wcd934x_regmap_register_patch(struct regmap *regmap, int revision)
+{
+ int rc = 0;
+
+ if (!regmap) {
+ pr_err("%s: regmap struct is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (revision) {
+ case TAVIL_VERSION_1_1:
+ case TAVIL_VERSION_WCD9340_1_1:
+ case TAVIL_VERSION_WCD9341_1_1:
+ regcache_cache_only(regmap, true);
+ rc = regmap_multi_reg_write(regmap, wcd934x_1_1_defaults,
+ ARRAY_SIZE(wcd934x_1_1_defaults));
+ regcache_cache_only(regmap, false);
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(wcd934x_regmap_register_patch);
+
static bool wcd934x_is_readable_register(struct device *dev, unsigned int reg)
{
u8 pg_num, reg_offset;
@@ -1861,6 +1904,9 @@ static bool wcd934x_is_volatile_register(struct device *dev, unsigned int reg)
(reg <= WCD934X_CDC_ANC1_FB_GAIN_CTL))
return true;
+ if ((reg >= WCD934X_CODEC_CPR_WR_DATA_0) &&
+ (reg <= WCD934X_CODEC_CPR_RD_DATA_3))
+ return true;
/*
* Need to mark volatile for registers that are writable but
diff --git a/drivers/mfd/wcd9xxx-regmap.h b/drivers/mfd/wcd9xxx-regmap.h
index 62e4a620c71c..6db8fc55acae 100644
--- a/drivers/mfd/wcd9xxx-regmap.h
+++ b/drivers/mfd/wcd9xxx-regmap.h
@@ -21,6 +21,8 @@ typedef int (*regmap_patch_fptr)(struct regmap *, int);
#ifdef CONFIG_WCD934X_CODEC
extern struct regmap_config wcd934x_regmap_config;
+extern int wcd934x_regmap_register_patch(struct regmap *regmap,
+ int version);
#endif
#ifdef CONFIG_WCD9335_CODEC
@@ -71,6 +73,11 @@ static inline regmap_patch_fptr wcd9xxx_get_regmap_reg_patch(int type)
apply_patch = wcd9335_regmap_register_patch;
break;
#endif
+#ifdef CONFIG_WCD934X_CODEC
+ case WCD934X:
+ apply_patch = wcd934x_regmap_register_patch;
+ break;
+#endif
default:
apply_patch = NULL;
break;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c
index 065b426ca6d0..840597314a5f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c
@@ -601,6 +601,7 @@ long audio_in_compat_ioctl(struct file *file,
}
case AUDIO_GET_CONFIG_32: {
struct msm_audio_config32 cfg_32;
+ memset(&cfg_32, 0, sizeof(cfg_32));
cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
cfg_32.channel_count = audio->pcm_cfg.channel_count;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 0c44f79549d4..567c948b0efe 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -570,6 +570,8 @@ int audio_aio_release(struct inode *inode, struct file *file)
struct q6audio_aio *audio = file->private_data;
pr_debug("%s[%p]\n", __func__, audio);
mutex_lock(&audio->lock);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
audio->wflush = 1;
if (audio->wakelock_voted &&
(audio->audio_ws_mgr != NULL) &&
@@ -595,6 +597,8 @@ int audio_aio_release(struct inode *inode, struct file *file)
wake_up(&audio->event_wait);
audio_aio_reset_event_queue(audio);
q6asm_audio_client_free(audio->ac);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
mutex_unlock(&audio->lock);
mutex_destroy(&audio->lock);
mutex_destroy(&audio->read_lock);
@@ -1745,7 +1749,11 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd,
__func__);
rc = -EFAULT;
} else {
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_add(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -1760,7 +1768,11 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd,
__func__);
rc = -EFAULT;
} else {
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_remove(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -2064,7 +2076,11 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
} else {
info.fd = info_32.fd;
info.vaddr = compat_ptr(info_32.vaddr);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_add(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -2081,7 +2097,11 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
} else {
info.fd = info_32.fd;
info.vaddr = compat_ptr(info_32.vaddr);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_remove(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index be3ccf2536d9..203daf3bd5eb 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -21,6 +21,7 @@
#include <linux/mmc/sdio_func.h>
#include <linux/gfp.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/types.h>
@@ -48,11 +49,7 @@
#define CORE_SW_RST (1 << 7)
#define SDHCI_VER_100 0x2B
-#define CORE_MCI_DATA_CNT 0x30
-#define CORE_MCI_STATUS 0x34
-#define CORE_MCI_FIFO_CNT 0x44
-#define CORE_MCI_VERSION 0x050
#define CORE_VERSION_STEP_MASK 0x0000FFFF
#define CORE_VERSION_MINOR_MASK 0x0FFF0000
#define CORE_VERSION_MINOR_SHIFT 16
@@ -61,23 +58,12 @@
#define CORE_VERSION_TARGET_MASK 0x000000FF
#define SDHCI_MSM_VER_420 0x49
-#define CORE_GENERICS 0x70
#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
#define CORE_HC_MODE 0x78
#define HC_MODE_EN 0x1
#define FF_CLK_SW_RST_DIS (1 << 13)
-#define CORE_TESTBUS_CONFIG 0x0CC
-#define CORE_TESTBUS_SEL2_BIT 4
-#define CORE_TESTBUS_ENA (1 << 3)
-#define CORE_TESTBUS_SEL2 (1 << CORE_TESTBUS_SEL2_BIT)
-
-#define CORE_PWRCTL_STATUS 0xDC
-#define CORE_PWRCTL_MASK 0xE0
-#define CORE_PWRCTL_CLEAR 0xE4
-#define CORE_PWRCTL_CTL 0xE8
-
#define CORE_PWRCTL_BUS_OFF 0x01
#define CORE_PWRCTL_BUS_ON (1 << 1)
#define CORE_PWRCTL_IO_LOW (1 << 2)
@@ -91,7 +77,6 @@
#define INT_MASK 0xF
#define MAX_PHASES 16
-#define CORE_DLL_CONFIG 0x100
#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
#define CORE_DLL_EN (1 << 16)
#define CORE_CDR_EN (1 << 17)
@@ -100,11 +85,9 @@
#define CORE_DLL_PDN (1 << 29)
#define CORE_DLL_RST (1 << 30)
-#define CORE_DLL_STATUS 0x108
#define CORE_DLL_LOCK (1 << 7)
#define CORE_DDR_DLL_LOCK (1 << 11)
-#define CORE_VENDOR_SPEC 0x10C
#define CORE_CLK_PWRSAVE (1 << 1)
#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
@@ -117,23 +100,16 @@
#define CORE_HC_SELECT_IN_MASK (7 << 19)
#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
-#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 0x114
-#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 0x118
-
-#define CORE_VENDOR_SPEC_FUNC2 0x110
#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
#define HC_SW_RST_REQ (1 << 21)
#define CORE_ONE_MID_EN (1 << 25)
-#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11C
#define CORE_8_BIT_SUPPORT (1 << 18)
#define CORE_3_3V_SUPPORT (1 << 24)
#define CORE_3_0V_SUPPORT (1 << 25)
#define CORE_1_8V_SUPPORT (1 << 26)
#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
-#define CORE_SDCC_DEBUG_REG 0x124
-
#define CORE_CSR_CDC_CTLR_CFG0 0x130
#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
#define CORE_HW_AUTOCAL_ENA (1 << 17)
@@ -161,25 +137,20 @@
#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
#define CORE_CDC_SWITCH_RC_EN (1 << 1)
-#define CORE_DDR_200_CFG 0x184
#define CORE_CDC_T4_DLY_SEL (1 << 0)
#define CORE_CMDIN_RCLK_EN (1 << 1)
#define CORE_START_CDC_TRAFFIC (1 << 6)
-#define CORE_VENDOR_SPEC3 0x1B0
#define CORE_PWRSAVE_DLL (1 << 3)
#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
-#define CORE_DLL_CONFIG_2 0x1B4
#define CORE_DDR_CAL_EN (1 << 0)
#define CORE_FLL_CYCLE_CNT (1 << 18)
#define CORE_DLL_CLOCK_DISABLE (1 << 21)
-#define CORE_DDR_CONFIG 0x1B8
#define DDR_CONFIG_POR_VAL 0x80040853
#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
#define DDR_CONFIG_PRG_RCLK_DLY 115
-#define CORE_DDR_CONFIG_2 0x1BC
#define DDR_CONFIG_2_POR_VAL 0x80040873
/* 512 descriptors */
@@ -196,6 +167,149 @@
#define MAX_DRV_TYPES_SUPPORTED_HS200 4
#define MSM_AUTOSUSPEND_DELAY_MS 100
+struct sdhci_msm_offset {
+ u32 CORE_MCI_DATA_CNT;
+ u32 CORE_MCI_STATUS;
+ u32 CORE_MCI_FIFO_CNT;
+ u32 CORE_MCI_VERSION;
+ u32 CORE_GENERICS;
+ u32 CORE_TESTBUS_CONFIG;
+ u32 CORE_TESTBUS_SEL2_BIT;
+ u32 CORE_TESTBUS_ENA;
+ u32 CORE_TESTBUS_SEL2;
+ u32 CORE_PWRCTL_STATUS;
+ u32 CORE_PWRCTL_MASK;
+ u32 CORE_PWRCTL_CLEAR;
+ u32 CORE_PWRCTL_CTL;
+ u32 CORE_SDCC_DEBUG_REG;
+ u32 CORE_DLL_CONFIG;
+ u32 CORE_DLL_STATUS;
+ u32 CORE_VENDOR_SPEC;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
+ u32 CORE_VENDOR_SPEC_FUNC2;
+ u32 CORE_VENDOR_SPEC_CAPABILITIES0;
+ u32 CORE_DDR_200_CFG;
+ u32 CORE_VENDOR_SPEC3;
+ u32 CORE_DLL_CONFIG_2;
+ u32 CORE_DDR_CONFIG;
+ u32 CORE_DDR_CONFIG_2;
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
+ .CORE_MCI_DATA_CNT = 0x35C,
+ .CORE_MCI_STATUS = 0x324,
+ .CORE_MCI_FIFO_CNT = 0x308,
+ .CORE_MCI_VERSION = 0x318,
+ .CORE_GENERICS = 0x320,
+ .CORE_TESTBUS_CONFIG = 0x32C,
+ .CORE_TESTBUS_SEL2_BIT = 3,
+ .CORE_TESTBUS_ENA = (1 << 31),
+ .CORE_TESTBUS_SEL2 = (1 << 3),
+ .CORE_PWRCTL_STATUS = 0x240,
+ .CORE_PWRCTL_MASK = 0x244,
+ .CORE_PWRCTL_CLEAR = 0x248,
+ .CORE_PWRCTL_CTL = 0x24C,
+ .CORE_SDCC_DEBUG_REG = 0x358,
+ .CORE_DLL_CONFIG = 0x200,
+ .CORE_DLL_STATUS = 0x208,
+ .CORE_VENDOR_SPEC = 0x20C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x210,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
+ .CORE_DDR_200_CFG = 0x224,
+ .CORE_VENDOR_SPEC3 = 0x250,
+ .CORE_DLL_CONFIG_2 = 0x254,
+ .CORE_DDR_CONFIG = 0x258,
+ .CORE_DDR_CONFIG_2 = 0x25C,
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
+ .CORE_MCI_DATA_CNT = 0x30,
+ .CORE_MCI_STATUS = 0x34,
+ .CORE_MCI_FIFO_CNT = 0x44,
+ .CORE_MCI_VERSION = 0x050,
+ .CORE_GENERICS = 0x70,
+ .CORE_TESTBUS_CONFIG = 0x0CC,
+ .CORE_TESTBUS_SEL2_BIT = 4,
+ .CORE_TESTBUS_ENA = (1 << 3),
+ .CORE_TESTBUS_SEL2 = (1 << 4),
+ .CORE_PWRCTL_STATUS = 0xDC,
+ .CORE_PWRCTL_MASK = 0xE0,
+ .CORE_PWRCTL_CLEAR = 0xE4,
+ .CORE_PWRCTL_CTL = 0xE8,
+ .CORE_SDCC_DEBUG_REG = 0x124,
+ .CORE_DLL_CONFIG = 0x100,
+ .CORE_DLL_STATUS = 0x108,
+ .CORE_VENDOR_SPEC = 0x10C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x110,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
+ .CORE_DDR_200_CFG = 0x184,
+ .CORE_VENDOR_SPEC3 = 0x1B0,
+ .CORE_DLL_CONFIG_2 = 0x1B4,
+ .CORE_DDR_CONFIG = 0x1B8,
+ .CORE_DDR_CONFIG_2 = 0x1BC,
+};
+
+u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readb_relaxed(base_addr + offset);
+}
+
+u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readl_relaxed(base_addr + offset);
+}
+
+void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writeb_relaxed(val, base_addr + offset);
+}
+
+void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writel_relaxed(val, base_addr + offset);
+}
+
static const u32 tuning_block_64[] = {
0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -241,10 +355,14 @@ static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
u32 wait_cnt = 50;
u8 ck_out_en = 0;
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
/* poll for CK_OUT_EN bit. max. poll time = 50us */
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
while (ck_out_en != poll) {
if (--wait_cnt == 0) {
@@ -256,7 +374,7 @@ static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
udelay(1);
ck_out_en = !!(readl_relaxed(host->ioaddr +
- CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
}
out:
return rc;
@@ -270,18 +388,25 @@ static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
{
int rc = 0;
u32 config;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config |= CORE_CDR_EN;
config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
rc = msm_dll_poll_ck_out_en(host, 0);
if (rc)
goto err;
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
- CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
rc = msm_dll_poll_ck_out_en(host, 1);
if (rc)
@@ -328,6 +453,8 @@ static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
int rc = 0;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
u32 val = 0;
if (!msm_host->en_auto_cmd21)
@@ -340,11 +467,13 @@ static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
if (enable) {
rc = msm_enable_cdr_cm_sdc4_dll(host);
- writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
- val, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) | val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
} else {
- writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
- ~val, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & ~val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
}
return rc;
}
@@ -352,6 +481,10 @@ static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
{
int rc = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
0x8};
@@ -362,10 +495,12 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
rc = msm_dll_poll_ck_out_en(host, 0);
@@ -376,24 +511,28 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
* Write the selected DLL clock output phase (0 ... 15)
* to CDR_SELEXT bit field of DLL_CONFIG register.
*/
- writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
& ~(0xF << 20))
| (grey_coded_phase_table[phase] << 20)),
- host->ioaddr + CORE_DLL_CONFIG);
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
if (rc)
goto err_out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
goto out;
err_out:
@@ -522,6 +661,10 @@ static int msm_find_most_appropriate_phase(struct sdhci_host *host,
static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
{
u32 mclk_freq = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
/* Program the MCLK value to MCLK_FREQ bit field */
if (host->clock <= 112000000)
@@ -541,9 +684,10 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
else if (host->clock <= 200000000)
mclk_freq = 7;
- writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
& ~(7 << 24)) | (mclk_freq << 24)),
- host->ioaddr + CORE_DLL_CONFIG);
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
}
/* Initialize the DLL (Programmable Delay Line ) */
@@ -551,6 +695,8 @@ static int msm_init_cm_dll(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
struct mmc_host *mmc = host->mmc;
int rc = 0;
unsigned long flags;
@@ -559,8 +705,8 @@ static int msm_init_cm_dll(struct sdhci_host *host)
pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
- prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
- CORE_CLK_PWRSAVE);
+ prev_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
curr_pwrsave = prev_pwrsave;
/*
* Make sure that clock is always enabled when DLL
@@ -569,76 +715,89 @@ static int msm_init_cm_dll(struct sdhci_host *host)
* here and re-enable it once tuning is completed.
*/
if (prev_pwrsave) {
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE),
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
curr_pwrsave = false;
}
if (msm_host->use_updated_dll_reset) {
/* Disable the DLL clock */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_CK_OUT_EN),
- host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
- | CORE_DLL_CLOCK_DISABLE),
- host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
}
/* Write 1 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
msm_cm_dll_set_freq(host);
if (msm_host->use_updated_dll_reset) {
u32 mclk_freq = 0;
- if ((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
+ if ((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
& CORE_FLL_CYCLE_CNT))
mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
else
mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
- writel_relaxed(((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
- & ~(0xFF << 10)) | (mclk_freq << 10)),
- host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~(0xFF << 10)) | (mclk_freq << 10)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
/* wait for 5us before enabling DLL clock */
udelay(5);
}
/* Write 0 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 0 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
if (msm_host->use_updated_dll_reset) {
msm_cm_dll_set_freq(host);
/* Enable the DLL clock */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
- & ~CORE_DLL_CLOCK_DISABLE),
- host->ioaddr + CORE_DLL_CONFIG_2);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
}
/* Set DLL_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
wait_cnt = 50;
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
- while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
- CORE_DLL_LOCK)) {
+ while (!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
/* max. wait for 50us sec for LOCK bit to be set */
if (--wait_cnt == 0) {
pr_err("%s: %s: DLL failed to LOCK\n",
@@ -653,14 +812,16 @@ static int msm_init_cm_dll(struct sdhci_host *host)
out:
/* Restore the correct PWRSAVE state */
if (prev_pwrsave ^ curr_pwrsave) {
- u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+ u32 reg = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
if (prev_pwrsave)
reg |= CORE_CLK_PWRSAVE;
else
reg &= ~CORE_CLK_PWRSAVE;
- writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(reg, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
}
spin_unlock_irqrestore(&host->lock, flags);
@@ -673,13 +834,18 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
u32 calib_done;
int ret = 0;
int cdc_err = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
/* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
& ~CORE_CDC_T4_DLY_SEL),
- host->ioaddr + CORE_DDR_200_CFG);
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
/* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
@@ -692,9 +858,10 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
host->ioaddr + CORE_CSR_CDC_GEN_CFG);
/* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
& ~CORE_START_CDC_TRAFFIC),
- host->ioaddr + CORE_DDR_200_CFG);
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
/*
* Perform CDC Register Initialization Sequence
@@ -765,9 +932,10 @@ static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
}
/* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
| CORE_START_CDC_TRAFFIC),
- host->ioaddr + CORE_DDR_200_CFG);
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
out:
pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
__func__, ret);
@@ -778,6 +946,8 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
u32 dll_status, ddr_config;
int ret = 0;
@@ -788,27 +958,31 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
* bootloaders.
*/
if (msm_host->rclk_delay_fix) {
- writel_relaxed(DDR_CONFIG_2_POR_VAL,
- host->ioaddr + CORE_DDR_CONFIG_2);
+ writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG_2);
} else {
ddr_config = DDR_CONFIG_POR_VAL &
~DDR_CONFIG_PRG_RCLK_DLY_MASK;
ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
- writel_relaxed(ddr_config, host->ioaddr + CORE_DDR_CONFIG);
+ writel_relaxed(ddr_config, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG);
}
if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
- | CORE_CMDIN_RCLK_EN),
- host->ioaddr + CORE_DDR_200_CFG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ | CORE_CMDIN_RCLK_EN), host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG);
/* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG_2)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
| CORE_DDR_CAL_EN),
- host->ioaddr + CORE_DLL_CONFIG_2);
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
/* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
- ret = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+ ret = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
if (ret == -ETIMEDOUT) {
@@ -826,9 +1000,10 @@ static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
* turned on for host controllers using this DLL.
*/
if (!msm_host->use_14lpp_dll)
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
- | CORE_PWRSAVE_DLL),
- host->ioaddr + CORE_VENDOR_SPEC3);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
mb();
out:
pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
@@ -877,6 +1052,8 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
int ret = 0;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
@@ -894,9 +1071,10 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
goto out;
/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CMD_DAT_TRACK_SEL),
- host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
if (msm_host->use_cdclp533)
/* Calibrate CDCLP533 DLL HW */
@@ -2321,12 +2499,17 @@ void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
mmc_hostname(host->mmc),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK),
- readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_MASK),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL));
}
static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
@@ -2334,6 +2517,8 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
struct sdhci_host *host = (struct sdhci_host *)data;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
u8 irq_status = 0;
u8 irq_ack = 0;
int ret = 0;
@@ -2341,12 +2526,16 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
unsigned long flags;
int retry = 10;
- irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+ irq_status = sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+
pr_debug("%s: Received IRQ(%d), status=0x%x\n",
mmc_hostname(msm_host->mmc), irq, irq_status);
/* Clear the interrupt */
- writeb_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+
/*
* SDHC has core_mem and hc_mem device memory and these memory
* addresses do not fall within 1KB region. Hence, any update to
@@ -2361,16 +2550,16 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
* sure status register is cleared. Otherwise, this will result in
* a spurious power IRQ resulting in system instability.
*/
- while (irq_status &
- readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS)) {
+ while (irq_status & sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS)) {
if (retry == 0) {
pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
mmc_hostname(host->mmc), irq_status);
sdhci_msm_dump_pwr_ctrl_regs(host);
BUG_ON(1);
}
- writeb_relaxed(irq_status,
- (msm_host->core_mem + CORE_PWRCTL_CLEAR));
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
retry--;
udelay(10);
}
@@ -2432,7 +2621,8 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
}
/* ACK status to the core */
- writeb_relaxed(irq_ack, (msm_host->core_mem + CORE_PWRCTL_CTL));
+ sdhci_msm_writeb_relaxed(irq_ack, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
/*
* SDHC has core_mem and hc_mem device memory and these memory
* addresses do not fall within 1KB region. Hence, any update to
@@ -2442,14 +2632,16 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
mb();
if ((io_level & REQ_IO_HIGH) && (msm_host->caps_0 & CORE_3_0V_SUPPORT))
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
- ~CORE_IO_PAD_PWR_SWITCH),
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
else if ((io_level & REQ_IO_LOW) ||
(msm_host->caps_0 & CORE_1_8V_SUPPORT))
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
- CORE_IO_PAD_PWR_SWITCH),
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
mb();
pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
@@ -2534,6 +2726,8 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
unsigned long flags;
bool done = false;
u32 io_sig_sts;
@@ -2542,7 +2736,9 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
mmc_hostname(host->mmc), __func__, req_type,
msm_host->curr_pwr_state, msm_host->curr_io_level);
- io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
+ io_sig_sts = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_GENERICS);
+
/*
* The IRQ for request type IO High/Low will be generated when -
* 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
@@ -2589,16 +2785,23 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
{
- u32 config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
if (enable) {
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
} else {
config &= ~CORE_CDR_EN;
config |= CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
}
}
@@ -2809,6 +3012,8 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
int rc;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
struct mmc_card *card = host->mmc->card;
struct mmc_ios curr_ios = host->mmc->ios;
u32 sup_clock, ddr_clock, dll_lock;
@@ -2819,8 +3024,10 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
* disable pwrsave to ensure clock is not auto-gated until
* the rate is >400KHz (initialization complete).
*/
- writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
- ~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
sdhci_msm_prepare_clocks(host, false);
host->clock = clock;
goto out;
@@ -2830,21 +3037,23 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
if (rc)
goto out;
- curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
- CORE_CLK_PWRSAVE);
+ curr_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
if ((clock > 400000) &&
!curr_pwrsave && card && mmc_host_may_gate_card(card))
- writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- | CORE_CLK_PWRSAVE,
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ | CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
/*
* Disable pwrsave for a newly added card if doesn't allow clock
* gating.
*/
else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
- writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE,
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
@@ -2880,10 +3089,11 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
*/
if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
/* Select the divided clock (free running MCLK/2) */
- writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_HC_MCLK_SEL_MASK)
- | CORE_HC_MCLK_SEL_HS400),
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_MCLK_SEL_MASK)
+ | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
/*
* Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
* register
@@ -2897,10 +3107,10 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
* field in VENDOR_SPEC_FUNC
*/
writel_relaxed((readl_relaxed(host->ioaddr + \
- CORE_VENDOR_SPEC)
+ msm_host_offset->CORE_VENDOR_SPEC)
| CORE_HC_SELECT_IN_HS400
- | CORE_HC_SELECT_IN_EN),
- host->ioaddr + CORE_VENDOR_SPEC);
+ | CORE_HC_SELECT_IN_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
}
if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
/*
@@ -2908,7 +3118,8 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
* CORE_DLL_STATUS to be set. This should get set
* with in 15 us at 200 MHz.
*/
- rc = readl_poll_timeout(host->ioaddr + CORE_DLL_STATUS,
+ rc = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
dll_lock, (dll_lock & (CORE_DLL_LOCK |
CORE_DDR_DLL_LOCK)), 10, 1000);
if (rc == -ETIMEDOUT)
@@ -2920,14 +3131,16 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
if (!msm_host->use_cdclp533)
/* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
writel_relaxed((readl_relaxed(host->ioaddr +
- CORE_VENDOR_SPEC3) & ~CORE_PWRSAVE_DLL),
- host->ioaddr + CORE_VENDOR_SPEC3);
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
/* Select the default clock (free running MCLK) */
- writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
& ~CORE_HC_MCLK_SEL_MASK)
- | CORE_HC_MCLK_SEL_DFLT),
- host->ioaddr + CORE_VENDOR_SPEC);
+ | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
/*
* Disable HC_SELECT_IN to be able to use the UHS mode select
@@ -2937,10 +3150,11 @@ static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
* Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
* in VENDOR_SPEC_FUNC
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
& ~CORE_HC_SELECT_IN_EN
- & ~CORE_HC_SELECT_IN_MASK),
- host->ioaddr + CORE_VENDOR_SPEC);
+ & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
}
mb();
@@ -2971,6 +3185,8 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
u16 ctrl_2;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -3006,14 +3222,16 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
*
* Write 1 to DLL_RST bit of DLL_CONFIG register
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST),
- host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_RST), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN),
- host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_PDN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
mb();
/*
@@ -3033,12 +3251,15 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
#define DRV_NAME "cmdq-host"
static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
{
+ int i = 0;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
- int i = 0;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
struct cmdq_host *cq_host = host->cq_host;
- u32 version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+ u32 version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
u16 minor = version & CORE_VERSION_TARGET_MASK;
/* registers offset changed starting from 4.2.0 */
int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
@@ -3060,6 +3281,8 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
int tbsel, tbsel2;
int i, index = 0;
u32 test_bus_val = 0;
@@ -3071,19 +3294,29 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
sdhci_msm_cmdq_dump_debug_ram(host);
pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
- readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
- readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
- readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_STATUS));
pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
- readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
- readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
- readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION));
pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
- readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
- readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
- readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
pr_info("Vndr func2: 0x%08x\n",
- readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2));
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
/*
* tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
@@ -3098,12 +3331,13 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
for (tbsel = 0; tbsel < 8; tbsel++) {
if (index >= MAX_TEST_BUS)
break;
- test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
- tbsel | CORE_TESTBUS_ENA;
- writel_relaxed(test_bus_val,
- msm_host->core_mem + CORE_TESTBUS_CONFIG);
- debug_reg[index++] = readl_relaxed(msm_host->core_mem +
- CORE_SDCC_DEBUG_REG);
+ test_bus_val =
+ (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
+ tbsel | msm_host_offset->CORE_TESTBUS_ENA;
+ sdhci_msm_writel_relaxed(test_bus_val, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ debug_reg[index++] = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_SDCC_DEBUG_REG);
}
}
for (i = 0; i < MAX_TEST_BUS; i = i + 4)
@@ -3140,6 +3374,8 @@ static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
if (!msm_host->enhanced_strobe ||
!mmc_card_strobe(msm_host->mmc->card)) {
@@ -3149,13 +3385,15 @@ static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
}
if (set) {
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
- | CORE_CMDEN_HS400_INPUT_MASK_CNT),
- host->ioaddr + CORE_VENDOR_SPEC3);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
} else {
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC3)
- & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
- host->ioaddr + CORE_VENDOR_SPEC3);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
}
}
@@ -3163,15 +3401,19 @@ static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
if (set) {
- writel_relaxed(CORE_TESTBUS_ENA,
- msm_host->core_mem + CORE_TESTBUS_CONFIG);
+ sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
+ host, msm_host_offset->CORE_TESTBUS_CONFIG);
} else {
u32 value;
- value = readl_relaxed(msm_host->core_mem + CORE_TESTBUS_CONFIG);
- value &= ~CORE_TESTBUS_ENA;
- writel_relaxed(value, msm_host->core_mem + CORE_TESTBUS_CONFIG);
+ value = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
+ sdhci_msm_writel_relaxed(value, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
}
}
@@ -3205,15 +3447,20 @@ void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
{
u32 vendor_func2;
unsigned long timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
- vendor_func2 = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
+ vendor_func2 = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
if (enable) {
writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
- CORE_VENDOR_SPEC_FUNC2);
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
timeout = 10000;
- while (readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2) &
- HC_SW_RST_REQ) {
+ while (readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
if (timeout == 0) {
pr_info("%s: Applying wait idle disable workaround\n",
mmc_hostname(host->mmc));
@@ -3225,10 +3472,10 @@ void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
* AXI bus.
*/
vendor_func2 = readl_relaxed(host->ioaddr +
- CORE_VENDOR_SPEC_FUNC2);
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
writel_relaxed(vendor_func2 |
- HC_SW_RST_WAIT_IDLE_DIS,
- host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
+ HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
host->reset_wa_t = ktime_get();
return;
}
@@ -3239,7 +3486,7 @@ void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
mmc_hostname(host->mmc));
} else {
writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
- host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
}
}
@@ -3755,8 +4002,11 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
u16 minor;
u8 major;
u32 val;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
- version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+ version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
major = (version & CORE_VERSION_MAJOR_MASK) >>
CORE_VERSION_MAJOR_SHIFT;
minor = version & CORE_VERSION_TARGET_MASK;
@@ -3789,9 +4039,10 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
*/
if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
- val = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
+ val = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
writel_relaxed((val | CORE_ONE_MID_EN),
- host->ioaddr + CORE_VENDOR_SPEC_FUNC2);
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
}
/*
* SDCC 5 controller with major version 1, minor version 0x34 and later
@@ -3825,9 +4076,9 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
/* Fake 3.0V support for SDIO devices which requires such voltage */
if (msm_host->pdata->core_3_0v_support) {
caps |= CORE_3_0V_SUPPORT;
- writel_relaxed(
- (readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
- caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ SDHCI_CAPABILITIES) | caps), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
}
if ((major == 1) && (minor >= 0x49))
@@ -3839,7 +4090,8 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
if (!msm_host->pdata->largeaddressbus)
caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
- writel_relaxed(caps, host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
+ writel_relaxed(caps, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
/* keep track of the value in SDHCI_CAPABILITIES */
msm_host->caps_0 = caps;
}
@@ -3892,6 +4144,7 @@ static bool sdhci_msm_is_bootdevice(struct device *dev)
static int sdhci_msm_probe(struct platform_device *pdev)
{
+ const struct sdhci_msm_offset *msm_host_offset;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
@@ -3911,6 +4164,14 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto out;
}
+ if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
+ msm_host->mci_removed = true;
+ msm_host->offset = &sdhci_msm_offset_mci_removed;
+ } else {
+ msm_host->mci_removed = false;
+ msm_host->offset = &sdhci_msm_offset_mci_present;
+ }
+ msm_host_offset = msm_host->offset;
msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
if (IS_ERR(host)) {
@@ -4086,17 +4347,19 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Reset the core and Enable SDHC mode */
core_memres = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "core_mem");
- if (!core_memres) {
- dev_err(&pdev->dev, "Failed to get iomem resource\n");
- goto vreg_deinit;
- }
- msm_host->core_mem = devm_ioremap(&pdev->dev, core_memres->start,
- resource_size(core_memres));
+ if (!msm_host->mci_removed) {
+ if (!core_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource\n");
+ goto vreg_deinit;
+ }
+ msm_host->core_mem = devm_ioremap(&pdev->dev,
+ core_memres->start, resource_size(core_memres));
- if (!msm_host->core_mem) {
- dev_err(&pdev->dev, "Failed to remap registers\n");
- ret = -ENOMEM;
- goto vreg_deinit;
+ if (!msm_host->core_mem) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = -ENOMEM;
+ goto vreg_deinit;
+ }
}
tlmm_memres = platform_get_resource_byname(pdev,
@@ -4119,24 +4382,27 @@ static int sdhci_msm_probe(struct platform_device *pdev)
* Reset the vendor spec register to power on reset state.
*/
writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
- host->ioaddr + CORE_VENDOR_SPEC);
-
- /* Set HC_MODE_EN bit in HC_MODE register */
- writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
- /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
- writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
- FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
+ if (!msm_host->mci_removed) {
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+ writel_relaxed(readl_relaxed(msm_host->core_mem +
+ CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
+ msm_host->core_mem + CORE_HC_MODE);
+ }
sdhci_set_default_hw_caps(msm_host, host);
/*
* Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
* be used as required later on.
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
- CORE_IO_PAD_PWR_SWITCH_EN),
- host->ioaddr + CORE_VENDOR_SPEC);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
/*
* CORE_SW_RST above may trigger power irq if previous status of PWRCTL
* was either BUS_ON or IO_HIGH_V. So before we enable the power irq
@@ -4144,14 +4410,19 @@ static int sdhci_msm_probe(struct platform_device *pdev)
* ensure that any pending power irq interrupt status is acknowledged
* otherwise power irq interrupt handler would be fired prematurely.
*/
- irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
- writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
- irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
+ irq_status = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+ sdhci_msm_writel_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+ irq_ctl = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+
if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
- writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
+ sdhci_msm_writel_relaxed(irq_ctl, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
/*
* Ensure that above writes are propogated before interrupt enablement
@@ -4215,7 +4486,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Enable pwr irq interrupts */
- writel_relaxed(INT_MASK, (msm_host->core_mem + CORE_PWRCTL_MASK));
+ sdhci_msm_writel_relaxed(INT_MASK, host,
+ msm_host_offset->CORE_PWRCTL_MASK);
#ifdef CONFIG_MMC_CLKGATE
/* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
@@ -4653,6 +4925,7 @@ static const struct dev_pm_ops sdhci_msm_pmops = {
#endif
static const struct of_device_id sdhci_msm_dt_match[] = {
{.compatible = "qcom,sdhci-msm"},
+ {.compatible = "qcom,sdhci-msm-v5"},
{},
};
MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index e47a5083e8a6..6f96ea97bddc 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -214,6 +214,8 @@ struct sdhci_msm_host {
bool pm_qos_group_enable;
struct sdhci_msm_pm_qos_irq pm_qos_irq;
bool tuning_in_progress;
+ bool mci_removed;
+ const struct sdhci_msm_offset *offset;
};
extern char *saved_command_line;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 81651c7dec72..a0f76581e6eb 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -19,6 +19,7 @@ wil6210-y += wil_platform.o
wil6210-y += ethtool.o
wil6210-y += wil_crash_dump.o
wil6210-y += p2p.o
+wil6210-y += ftm.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 60f2a2e541a9..17b419d408cd 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include "wil6210.h"
#include "wmi.h"
+#include "ftm.h"
#define WIL_MAX_ROC_DURATION_MS 5000
@@ -36,6 +37,90 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
/* channel 4 not supported yet */
};
+/* Vendor id to be used in vendor specific command and events
+ * to user space.
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+#define QCA_NL80211_VENDOR_ID 0x001374
+
+enum qca_nl80211_vendor_subcmds {
+ QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA = 128,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION = 129,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION = 130,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT = 131,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE = 132,
+ QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER = 133,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS = 134,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS = 135,
+ QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT = 136,
+};
+
+/* vendor specific commands */
+static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_get_capabilities
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_start_session
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_ABORT_SESSION,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_abort_session
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_ftm_configure_responder
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_aoa_start_measurement
+ },
+ {
+ .info.vendor_id = QCA_NL80211_VENDOR_ID,
+ .info.subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_ABORT_MEAS,
+ .flags = WIPHY_VENDOR_CMD_NEED_WDEV |
+ WIPHY_VENDOR_CMD_NEED_RUNNING,
+ .doit = wil_aoa_abort_measurement
+ },
+};
+
+/* vendor specific events */
+static const struct nl80211_vendor_cmd_info wil_nl80211_vendor_events[] = {
+ [QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT
+ },
+ [QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE
+ },
+ [QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ },
+};
+
static struct ieee80211_supported_band wil_band_60ghz = {
.channels = wil_60ghz_channels,
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
@@ -1483,6 +1568,11 @@ static void wil_wiphy_init(struct wiphy *wiphy)
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
wiphy->mgmt_stypes = wil_mgmt_stypes;
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
+
+ wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
+ wiphy->vendor_commands = wil_nl80211_vendor_commands;
+ wiphy->vendor_events = wil_nl80211_vendor_events;
+ wiphy->n_vendor_events = ARRAY_SIZE(wil_nl80211_vendor_events);
}
struct wireless_dev *wil_cfg80211_init(struct device *dev)
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
new file mode 100644
index 000000000000..5cf07343a33c
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -0,0 +1,903 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <net/netlink.h>
+#include "wil6210.h"
+#include "ftm.h"
+#include "wmi.h"
+
+/* FTM session ID we use with FW */
+#define WIL_FTM_FW_SESSION_ID 1
+
+/* fixed spare allocation we reserve in NL messages we allocate */
+#define WIL_FTM_NL_EXTRA_ALLOC 32
+
+/* approx maximum length for FTM_MEAS_RESULT NL80211 event */
+#define WIL_FTM_MEAS_RESULT_MAX_LENGTH 2048
+
+/* timeout for waiting for standalone AOA measurement, milliseconds */
+#define WIL_AOA_MEASUREMENT_TIMEOUT 1000
+
+/* maximum number of allowed FTM measurements per burst */
+#define WIL_FTM_MAX_MEAS_PER_BURST 31
+
+/* initial token to use on non-secure FTM measurement */
+#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2
+
+#define WIL_TOF_FTM_MAX_LCI_LENGTH (240)
+#define WIL_TOF_FTM_MAX_LCR_LENGTH (240)
+
+static const struct
+nla_policy wil_nl80211_loc_policy[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE] = { .type = NLA_U64 },
+ [QCA_WLAN_VENDOR_ATTR_LOC_CAPA] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE] = { .type = NLA_FLAG },
+ [QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_AOA_TYPE] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK] = { .type = NLA_U32 },
+};
+
+static const struct
+nla_policy wil_nl80211_ftm_peer_policy[
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] = { .len = ETH_ALEN },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 },
+};
+
+static const struct
+nla_policy wil_nl80211_ftm_meas_param_policy[
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1] = {
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD] = { .type = NLA_U16 },
+};
+
+static int wil_ftm_parse_meas_params(struct wil6210_priv *wil,
+ struct nlattr *attr,
+ struct wil_ftm_meas_params *params)
+{
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX + 1];
+ int rc;
+
+ if (!attr) {
+ /* temporary defaults for one-shot measurement */
+ params->meas_per_burst = 1;
+ params->burst_period = 5; /* 500 milliseconds */
+ return 0;
+ }
+ rc = nla_parse_nested(tb, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX,
+ attr, wil_nl80211_ftm_meas_param_policy);
+ if (rc) {
+ wil_err(wil, "invalid measurement params\n");
+ return rc;
+ }
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST])
+ params->meas_per_burst = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP])
+ params->num_of_bursts_exp = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION])
+ params->burst_duration = nla_get_u8(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION]);
+ if (tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD])
+ params->burst_period = nla_get_u16(
+ tb[QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD]);
+ return 0;
+}
+
+static int wil_ftm_validate_meas_params(struct wil6210_priv *wil,
+ struct wil_ftm_meas_params *params)
+{
+ /* temporary allow only single-burst */
+ if (params->meas_per_burst > WIL_FTM_MAX_MEAS_PER_BURST ||
+ params->num_of_bursts_exp != 0) {
+ wil_err(wil, "invalid measurement params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wil_ftm_append_meas_params(struct wil6210_priv *wil,
+ struct sk_buff *msg,
+ struct wil_ftm_meas_params *params)
+{
+ struct nlattr *nl_p;
+
+ nl_p = nla_nest_start(
+ msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS);
+ if (!nl_p)
+ goto out_put_failure;
+ if (nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST,
+ params->meas_per_burst) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP,
+ params->num_of_bursts_exp) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION,
+ params->burst_duration) ||
+ nla_put_u16(msg, QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD,
+ params->burst_period))
+ goto out_put_failure;
+ nla_nest_end(msg, nl_p);
+ return 0;
+out_put_failure:
+ return -ENOBUFS;
+}
+
+static int wil_ftm_append_peer_meas_res(struct wil6210_priv *wil,
+ struct sk_buff *msg,
+ struct wil_ftm_peer_meas_res *res)
+{
+ struct nlattr *nl_mres, *nl_f;
+ int i;
+
+ if (nla_put(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR,
+ ETH_ALEN, res->mac_addr) ||
+ nla_put_u32(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS,
+ res->flags) ||
+ nla_put_u8(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS,
+ res->status))
+ goto out_put_failure;
+ if (res->status == QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED &&
+ nla_put_u8(msg,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS,
+ res->value_seconds))
+ goto out_put_failure;
+ if (res->has_params &&
+ wil_ftm_append_meas_params(wil, msg, &res->params))
+ goto out_put_failure;
+ nl_mres = nla_nest_start(msg, QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS);
+ if (!nl_mres)
+ goto out_put_failure;
+ for (i = 0; i < res->n_meas; i++) {
+ nl_f = nla_nest_start(msg, i);
+ if (!nl_f)
+ goto out_put_failure;
+ if (nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1,
+ res->meas[i].t1) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2,
+ res->meas[i].t2) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3,
+ res->meas[i].t3) ||
+ nla_put_u64(msg, QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4,
+ res->meas[i].t4))
+ goto out_put_failure;
+ nla_nest_end(msg, nl_f);
+ }
+ nla_nest_end(msg, nl_mres);
+ return 0;
+out_put_failure:
+ wil_err(wil, "fail to append peer result\n");
+ return -ENOBUFS;
+}
+
+static void wil_ftm_send_meas_result(struct wil6210_priv *wil,
+ struct wil_ftm_peer_meas_res *res)
+{
+ struct sk_buff *vendor_event = NULL;
+ struct nlattr *nl_res;
+ int rc = 0;
+
+ wil_dbg_misc(wil, "sending %d results for peer %pM\n",
+ res->n_meas, res->mac_addr);
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ WIL_FTM_MEAS_RESULT_MAX_LENGTH,
+ QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event) {
+ wil_err(wil, "fail to allocate measurement result\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (nla_put_u64(
+ vendor_event, QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE,
+ wil->ftm.session_cookie)) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ nl_res = nla_nest_start(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS);
+ if (!nl_res) {
+ rc = -ENOBUFS;
+ goto out;
+ }
+
+ rc = wil_ftm_append_peer_meas_res(wil, vendor_event, res);
+ if (rc)
+ goto out;
+
+ nla_nest_end(vendor_event, nl_res);
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+ vendor_event = NULL;
+out:
+ if (vendor_event)
+ kfree_skb(vendor_event);
+ if (rc)
+ wil_err(wil, "send peer result failed, err %d\n", rc);
+}
+
+static void wil_ftm_send_peer_res(struct wil6210_priv *wil)
+{
+ if (!wil->ftm.has_ftm_res || !wil->ftm.ftm_res)
+ return;
+
+ wil_ftm_send_meas_result(wil, wil->ftm.ftm_res);
+ wil->ftm.has_ftm_res = 0;
+ wil->ftm.ftm_res->n_meas = 0;
+}
+
+static void wil_aoa_measurement_timeout(struct work_struct *work)
+{
+ struct wil_ftm_priv *ftm = container_of(work, struct wil_ftm_priv,
+ aoa_timeout_work);
+ struct wil6210_priv *wil = container_of(ftm, struct wil6210_priv, ftm);
+ struct wil_aoa_meas_result res;
+
+ wil_dbg_misc(wil, "AOA measurement timeout\n");
+
+ memset(&res, 0, sizeof(res));
+ ether_addr_copy(res.mac_addr, wil->ftm.aoa_peer_mac_addr);
+ res.type = wil->ftm.aoa_type;
+ res.status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED;
+ wil_aoa_cfg80211_meas_result(wil, &res);
+}
+
+static int
+wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
+ struct wil_ftm_session_request *request)
+{
+ int rc = 0;
+ bool has_lci = false, has_lcr = false;
+ u8 max_meas = 0, *ptr;
+ u32 i, cmd_len;
+ struct wmi_tof_session_start_cmd *cmd;
+
+ mutex_lock(&wil->ftm.lock);
+ if (wil->ftm.session_started) {
+ wil_err(wil, "FTM session already running\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ /* for now allow measurement to associated AP only */
+ if (!test_bit(wil_status_fwconnected, wil->status)) {
+ wil_err(wil, "must be associated\n");
+ rc = -ENOTSUPP;
+ goto out;
+ }
+
+ for (i = 0; i < request->n_peers; i++) {
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI)
+ has_lci = true;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR)
+ has_lcr = true;
+ max_meas = max(max_meas,
+ request->peers[i].params.meas_per_burst);
+ }
+
+ wil->ftm.ftm_res = kzalloc(sizeof(*wil->ftm.ftm_res) +
+ max_meas * sizeof(struct wil_ftm_peer_meas) +
+ (has_lci ? WIL_TOF_FTM_MAX_LCI_LENGTH : 0) +
+ (has_lcr ? WIL_TOF_FTM_MAX_LCR_LENGTH : 0), GFP_KERNEL);
+ if (!wil->ftm.ftm_res) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ ptr = (u8 *)wil->ftm.ftm_res;
+ ptr += sizeof(struct wil_ftm_peer_meas_res) +
+ max_meas * sizeof(struct wil_ftm_peer_meas);
+ if (has_lci) {
+ wil->ftm.ftm_res->lci = ptr;
+ ptr += WIL_TOF_FTM_MAX_LCI_LENGTH;
+ }
+ if (has_lcr)
+ wil->ftm.ftm_res->lcr = ptr;
+ wil->ftm.max_ftm_meas = max_meas;
+
+ cmd_len = sizeof(struct wmi_tof_session_start_cmd) +
+ request->n_peers * sizeof(struct wmi_ftm_dest_info);
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (!cmd) {
+ rc = -ENOMEM;
+ goto out_ftm_res;
+ }
+
+ cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID);
+ cmd->num_of_dest = cpu_to_le16(request->n_peers);
+ for (i = 0; i < request->n_peers; i++) {
+ ether_addr_copy(cmd->ftm_dest_info[i].dst_mac,
+ request->peers[i].mac_addr);
+ cmd->ftm_dest_info[i].channel = request->peers[i].channel;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE) {
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_SECURED;
+ cmd->ftm_dest_info[i].initial_token =
+ request->peers[i].secure_token_id;
+ } else {
+ cmd->ftm_dest_info[i].initial_token =
+ WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN;
+ }
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_ASAP;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_LCI_REQ;
+ if (request->peers[i].flags &
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR)
+ cmd->ftm_dest_info[i].flags |=
+ WMI_TOF_SESSION_START_FLAG_LCR_REQ;
+ cmd->ftm_dest_info[i].num_of_ftm_per_burst =
+ request->peers[i].params.meas_per_burst;
+ cmd->ftm_dest_info[i].num_of_bursts_exp =
+ request->peers[i].params.num_of_bursts_exp;
+ cmd->ftm_dest_info[i].burst_duration =
+ request->peers[i].params.burst_duration;
+ cmd->ftm_dest_info[i].burst_period =
+ cpu_to_le16(request->peers[i].params.burst_period);
+ }
+
+ rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len);
+ kfree(cmd);
+
+ if (rc)
+ goto out_ftm_res;
+
+ wil->ftm.session_cookie = request->session_cookie;
+ wil->ftm.session_started = 1;
+
+out_ftm_res:
+ if (rc) {
+ kfree(wil->ftm.ftm_res);
+ wil->ftm.ftm_res = NULL;
+ }
+out:
+ mutex_unlock(&wil->ftm.lock);
+ return rc;
+}
+
+static void
+wil_ftm_cfg80211_session_ended(struct wil6210_priv *wil, u32 status)
+{
+ struct sk_buff *vendor_event = NULL;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.session_started) {
+ wil_dbg_misc(wil, "FTM session not started, ignoring event\n");
+ goto out;
+ }
+
+ /* finish the session */
+ wil_dbg_misc(wil, "finishing FTM session\n");
+
+ /* send left-over results if any */
+ wil_ftm_send_peer_res(wil);
+
+ wil->ftm.session_started = 0;
+ kfree(wil->ftm.ftm_res);
+ wil->ftm.ftm_res = NULL;
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ WIL_FTM_NL_EXTRA_ALLOC,
+ QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event)
+ goto out;
+
+ if (nla_put_u64(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE,
+ wil->ftm.session_cookie) ||
+ nla_put_u32(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS, status)) {
+ wil_err(wil, "failed to fill session done event\n");
+ goto out;
+ }
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+ vendor_event = NULL;
+out:
+ kfree_skb(vendor_event);
+ mutex_unlock(&wil->ftm.lock);
+}
+
+static void wil_aoa_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "AOA timer\n");
+ schedule_work(&wil->ftm.aoa_timeout_work);
+}
+
+static int
+wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil,
+ struct wil_aoa_meas_request *request)
+{
+ int rc = 0;
+ struct cfg80211_bss *bss;
+ struct wmi_aoa_meas_cmd cmd;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (wil->ftm.aoa_started) {
+ wil_err(wil, "AOA measurement already running\n");
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (request->type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) {
+ wil_err(wil, "invalid AOA type: %d\n", request->type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ bss = cfg80211_get_bss(wil_to_wiphy(wil), NULL, request->mac_addr,
+ NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "Unable to find BSS\n");
+ rc = -ENOENT;
+ goto out;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ ether_addr_copy(cmd.mac_addr, request->mac_addr);
+ cmd.channel = bss->channel->hw_value - 1;
+ cmd.aoa_meas_type = request->type;
+
+ rc = wmi_send(wil, WMI_AOA_MEAS_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ goto out_bss;
+
+ ether_addr_copy(wil->ftm.aoa_peer_mac_addr, request->mac_addr);
+ mod_timer(&wil->ftm.aoa_timer,
+ jiffies + msecs_to_jiffies(WIL_AOA_MEASUREMENT_TIMEOUT));
+ wil->ftm.aoa_started = 1;
+out_bss:
+ cfg80211_put_bss(wil_to_wiphy(wil), bss);
+out:
+ mutex_unlock(&wil->ftm.lock);
+ return rc;
+}
+
+void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
+ struct wil_aoa_meas_result *result)
+{
+ struct sk_buff *vendor_event = NULL;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.aoa_started) {
+ wil_info(wil, "AOA not started, not sending result\n");
+ goto out;
+ }
+
+ wil_dbg_misc(wil, "sending AOA measurement result\n");
+
+ vendor_event = cfg80211_vendor_event_alloc(
+ wil_to_wiphy(wil),
+ wil->wdev,
+ result->length + WIL_FTM_NL_EXTRA_ALLOC,
+ QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event) {
+ wil_err(wil, "fail to allocate measurement result\n");
+ goto out;
+ }
+
+ if (nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_MAC_ADDR,
+ ETH_ALEN, result->mac_addr) ||
+ nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_TYPE,
+ result->type) ||
+ nla_put_u32(vendor_event, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS,
+ result->status) ||
+ nla_put_u32(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK,
+ result->antenna_array_mask)) {
+ wil_err(wil, "failed to fill vendor event\n");
+ goto out;
+ }
+
+ if (result->length > 0 &&
+ nla_put(vendor_event, QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT,
+ result->length, result->data)) {
+ wil_err(wil, "failed to fill vendor event with AOA data\n");
+ goto out;
+ }
+
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+
+ del_timer_sync(&wil->ftm.aoa_timer);
+ wil->ftm.aoa_started = 0;
+out:
+ mutex_unlock(&wil->ftm.lock);
+}
+
+void wil_ftm_evt_session_ended(struct wil6210_priv *wil,
+ struct wmi_tof_session_end_event *evt)
+{
+ u32 status;
+
+ switch (evt->status) {
+ case WMI_TOF_SESSION_END_NO_ERROR:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK;
+ break;
+ case WMI_TOF_SESSION_END_PARAMS_ERROR:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID;
+ break;
+ case WMI_TOF_SESSION_END_FAIL:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED;
+ break;
+ case WMI_TOF_SESSION_END_ABORTED:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED;
+ break;
+ default:
+ status = QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED;
+ break;
+ }
+
+ wil_ftm_cfg80211_session_ended(wil, status);
+}
+
+void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil,
+ struct wmi_tof_ftm_per_dest_res_event *evt)
+{
+ u32 i, index;
+ __le64 tmp = 0;
+ u8 n_meas;
+
+ mutex_lock(&wil->ftm.lock);
+
+ if (!wil->ftm.session_started || !wil->ftm.ftm_res) {
+ wil_dbg_misc(wil, "Session not running, ignoring res event\n");
+ goto out;
+ }
+ if (wil->ftm.has_ftm_res &&
+ !ether_addr_equal(evt->dst_mac, wil->ftm.ftm_res->mac_addr)) {
+ wil_dbg_misc(wil,
+ "Results for previous peer not properly terminated\n");
+ wil_ftm_send_peer_res(wil);
+ }
+
+ if (!wil->ftm.has_ftm_res) {
+ ether_addr_copy(wil->ftm.ftm_res->mac_addr, evt->dst_mac);
+ wil->ftm.has_ftm_res = 1;
+ }
+
+ n_meas = evt->actual_ftm_per_burst;
+ switch (evt->status) {
+ case WMI_PER_DEST_RES_NO_ERROR:
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK;
+ break;
+ case WMI_PER_DEST_RES_TX_RX_FAIL:
+ /* FW reports corrupted results here, discard. */
+ n_meas = 0;
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK;
+ break;
+ case WMI_PER_DEST_RES_PARAM_DONT_MATCH:
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID;
+ break;
+ default:
+ wil_err(wil, "unexpected status %d\n", evt->status);
+ wil->ftm.ftm_res->status =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID;
+ break;
+ }
+
+ for (i = 0; i < n_meas; i++) {
+ index = wil->ftm.ftm_res->n_meas;
+ if (index >= wil->ftm.max_ftm_meas) {
+ wil_dbg_misc(wil, "Too many measurements, some lost\n");
+ break;
+ }
+ memcpy(&tmp, evt->responder_ftm_res[i].t1,
+ sizeof(evt->responder_ftm_res[i].t1));
+ wil->ftm.ftm_res->meas[index].t1 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t2,
+ sizeof(evt->responder_ftm_res[i].t2));
+ wil->ftm.ftm_res->meas[index].t2 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t3,
+ sizeof(evt->responder_ftm_res[i].t3));
+ wil->ftm.ftm_res->meas[index].t3 = le64_to_cpu(tmp);
+ memcpy(&tmp, evt->responder_ftm_res[i].t4,
+ sizeof(evt->responder_ftm_res[i].t4));
+ wil->ftm.ftm_res->meas[index].t4 = le64_to_cpu(tmp);
+ wil->ftm.ftm_res->n_meas++;
+ }
+
+ if (evt->flags & WMI_PER_DEST_RES_BURST_REPORT_END)
+ wil_ftm_send_peer_res(wil);
+out:
+ mutex_unlock(&wil->ftm.lock);
+}
+
+void wil_aoa_evt_meas(struct wil6210_priv *wil,
+ struct wmi_aoa_meas_event *evt,
+ int len)
+{
+ int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data);
+ struct wil_aoa_meas_result *res;
+
+ data_len = min_t(int, le16_to_cpu(evt->length), data_len);
+
+ res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL);
+ if (!res)
+ return;
+
+ ether_addr_copy(res->mac_addr, evt->mac_addr);
+ res->type = evt->aoa_meas_type;
+ res->antenna_array_mask = le32_to_cpu(evt->meas_rf_mask);
+ res->status = evt->meas_status;
+ res->length = data_len;
+ memcpy(res->data, evt->meas_data, data_len);
+
+ wil_dbg_misc(wil, "AOA result status %d type %d mask %d length %d\n",
+ res->status, res->type,
+ res->antenna_array_mask, res->length);
+
+ wil_aoa_cfg80211_meas_result(wil, res);
+ kfree(res);
+}
+
+int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct sk_buff *skb;
+ struct nlattr *attr;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ /* we should get the capabilities from the FW. for now,
+ * report dummy capabilities for one shot measurement
+ */
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 128);
+ if (!skb)
+ return -ENOMEM;
+ attr = nla_nest_start(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA);
+ if (!attr ||
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP |
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA) ||
+ nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS,
+ 1) ||
+ nla_put_u16(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS, 1) ||
+ nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP,
+ 0) ||
+ nla_put_u8(skb, QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST,
+ 4) ||
+ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES,
+ BIT(QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE))) {
+ wil_err(wil, "fail to fill get_capabilities reply\n");
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ nla_nest_end(skb, attr);
+
+ return cfg80211_vendor_cmd_reply(skb);
+}
+
+int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_ftm_session_request *request;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1];
+ struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1];
+ struct nlattr *peer;
+ int rc, n_peers = 0, index = 0, tmp;
+ struct cfg80211_bss *bss;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len,
+ wil_nl80211_loc_policy);
+ if (rc) {
+ wil_err(wil, "Invalid ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS]) {
+ wil_err(wil, "no peers specified\n");
+ return -EINVAL;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]) {
+ wil_err(wil, "session cookie not specified\n");
+ return -EINVAL;
+ }
+
+ nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
+ tmp)
+ n_peers++;
+
+ if (!n_peers) {
+ wil_err(wil, "empty peer list\n");
+ return -EINVAL;
+ }
+
+ /* for now only allow measurement for a single peer */
+ if (n_peers != 1) {
+ wil_err(wil, "only single peer allowed\n");
+ return -EINVAL;
+ }
+
+ request = kzalloc(sizeof(*request) +
+ n_peers * sizeof(struct wil_ftm_meas_peer_info),
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ request->session_cookie =
+ nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]);
+ request->n_peers = n_peers;
+ nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
+ tmp) {
+ rc = nla_parse_nested(tb2, QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX,
+ peer, wil_nl80211_ftm_peer_policy);
+ if (rc) {
+ wil_err(wil, "Invalid peer ATTR\n");
+ goto out;
+ }
+ if (!tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR] ||
+ nla_len(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR])
+ != ETH_ALEN) {
+ wil_err(wil, "Peer MAC address missing or invalid\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(request->peers[index].mac_addr,
+ nla_data(tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR]),
+ ETH_ALEN);
+ bss = cfg80211_get_bss(wiphy, NULL,
+ request->peers[index].mac_addr, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY,
+ IEEE80211_PRIVACY_ANY);
+ if (!bss) {
+ wil_err(wil, "invalid bss at index %d\n", index);
+ rc = -ENOENT;
+ goto out;
+ }
+ request->peers[index].channel = bss->channel->hw_value - 1;
+ cfg80211_put_bss(wiphy, bss);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS])
+ request->peers[index].flags = nla_get_u32(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS]);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID])
+ request->peers[index].secure_token_id = nla_get_u8(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]);
+ rc = wil_ftm_parse_meas_params(
+ wil,
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS],
+ &request->peers[index].params);
+ if (!rc)
+ rc = wil_ftm_validate_meas_params(
+ wil, &request->peers[index].params);
+ if (rc)
+ goto out;
+ index++;
+ }
+
+ rc = wil_ftm_cfg80211_start_session(wil, request);
+out:
+ kfree(request);
+ return rc;
+}
+
+int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wil_aoa_meas_request request;
+ struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_LOC_MAX + 1];
+ int rc;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -ENOTSUPP;
+
+ wil_dbg_misc(wil, "AOA start measurement\n");
+
+ rc = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_LOC_MAX, data, data_len,
+ wil_nl80211_loc_policy);
+ if (rc) {
+ wil_err(wil, "Invalid ATTR\n");
+ return rc;
+ }
+
+ if (!tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR] ||
+ !tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) {
+ wil_err(wil, "Must specify MAC address and type\n");
+ return -EINVAL;
+ }
+
+ memset(&request, 0, sizeof(request));
+ ether_addr_copy(request.mac_addr,
+ nla_data(tb[QCA_WLAN_VENDOR_ATTR_MAC_ADDR]));
+ request.type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]);
+
+ rc = wil_aoa_cfg80211_start_measurement(wil, &request);
+ return rc;
+}
+
+int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "stub\n");
+ return -ENOTSUPP;
+}
+
+void wil_ftm_init(struct wil6210_priv *wil)
+{
+ mutex_init(&wil->ftm.lock);
+ setup_timer(&wil->ftm.aoa_timer, wil_aoa_timer_fn, (ulong)wil);
+ INIT_WORK(&wil->ftm.aoa_timeout_work, wil_aoa_measurement_timeout);
+}
+
+void wil_ftm_deinit(struct wil6210_priv *wil)
+{
+ del_timer_sync(&wil->ftm.aoa_timer);
+ cancel_work_sync(&wil->ftm.aoa_timeout_work);
+ kfree(wil->ftm.ftm_res);
+}
+
+void wil_ftm_stop_operations(struct wil6210_priv *wil)
+{
+ wil_ftm_cfg80211_session_ended(
+ wil, QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED);
+}
diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h
new file mode 100644
index 000000000000..9721344579aa
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/ftm.h
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __WIL6210_FTM_H__
+#define __WIL6210_FTM_H__
+
+/**
+ * NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
+ * vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
+ * qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
+ * git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
+ */
+
+/**
+ * enum qca_vendor_attr_loc - attributes for FTM and AOA commands
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE: Session cookie, specified in
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION. It will be provided by driver
+ * events and can be used to identify events targeted for this session.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA: Nested attribute containing extra
+ * FTM/AOA capabilities, returned by %QCA_NL80211_VENDOR_SUBCMD_LOC_GET_CAPA.
+ * see %enum qca_wlan_vendor_attr_loc_capa.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS: array of nested attributes
+ * containing information about each peer in measurement session
+ * request. See %enum qca_wlan_vendor_attr_peer_info for supported
+ * attributes for each peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RESULTS: nested attribute containing
+ * measurement results for a peer. reported by the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT event.
+ * See %enum qca_wlan_vendor_attr_peer_result for list of supported
+ * attributes.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE: flag attribute for
+ * enabling or disabling responder functionality.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_LCI: used in the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to
+ * specify the LCI report that will be sent by the responder during
+ * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0,
+ * 9.4.2.22.10
+ * @QCA_WLAN_VENDOR_ATTR_FTM_LCR: provided with the
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER command in order to
+ * specify the location civic report that will be sent by the responder during
+ * a measurement exchange. The format is defined in IEEE P802.11-REVmc/D5.0,
+ * 9.4.2.22.13
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS: session/measurement completion
+ * status code, reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_SESSION_DONE
+ * and %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN: initial dialog token used
+ * by responder (0 if not specified)
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE: AOA measurement type. Requested in
+ * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS and optionally in
+ * %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION if AOA measurements
+ * are needed as part of an FTM session.
+ * Reported by QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT.
+ * See enum qca_wlan_vendor_attr_aoa_type.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK: bit mask indicating
+ * which antenna arrays were used in location measurement.
+ * Reported in %QCA_NL80211_VENDOR_SUBCMD_FTM_MEAS_RESULT and
+ * %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT: AOA measurement data.
+ * Its contents depends on the AOA type and antenna array mask:
+ * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: array of U16 values,
+ * phase of the strongest CIR path for each antenna in the measured
+ * array(s).
+ * %QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: array of 2 U16
+ * values, phase and amplitude of the strongest CIR path for each
+ * antenna in the measured array(s)
+ */
+enum qca_wlan_vendor_attr_loc {
+ /* we reuse these attributes */
+ QCA_WLAN_VENDOR_ATTR_MAC_ADDR = 6,
+ QCA_WLAN_VENDOR_ATTR_PAD = 13,
+ QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE = 14,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA = 15,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS = 16,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEER_RESULTS = 17,
+ QCA_WLAN_VENDOR_ATTR_FTM_RESPONDER_ENABLE = 18,
+ QCA_WLAN_VENDOR_ATTR_FTM_LCI = 19,
+ QCA_WLAN_VENDOR_ATTR_FTM_LCR = 20,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS = 21,
+ QCA_WLAN_VENDOR_ATTR_FTM_INITIAL_TOKEN = 22,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE = 23,
+ QCA_WLAN_VENDOR_ATTR_LOC_ANTENNA_ARRAY_MASK = 24,
+ QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT = 25,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_LOC_MAX = QCA_WLAN_VENDOR_ATTR_LOC_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_loc_capa - indoor location capabilities
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS: various flags. See
+ * %enum qca_wlan_vendor_attr_loc_capa_flags
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS: Maximum number
+ * of measurement sessions that can run concurrently.
+ * Default is one session (no session concurrency)
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS: The total number of unique
+ * peers that are supported in running sessions. For example,
+ * if the value is 8 and maximum number of sessions is 2, you can
+ * have one session with 8 unique peers, or 2 sessions with 4 unique
+ * peers each, and so on.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP: Maximum number
+ * of bursts per peer, as an exponent (2^value). Default is 0,
+ * meaning no multi-burst support.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST: Maximum number
+ * of measurement exchanges allowed in a single burst
+ * @QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES: Supported AOA measurement
+ * types. A bit mask (unsigned 32 bit value), each bit corresponds
+ * to an AOA type as defined by %enum qca_vendor_attr_aoa_type.
+ */
+enum qca_wlan_vendor_attr_loc_capa {
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_INVALID,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_SESSIONS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_PEERS,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_NUM_BURSTS_EXP,
+ QCA_WLAN_VENDOR_ATTR_FTM_CAPA_MAX_MEAS_PER_BURST,
+ QCA_WLAN_VENDOR_ATTR_AOA_CAPA_SUPPORTED_TYPES,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_MAX =
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_loc_capa_flags: Indoor location capability flags
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER: Set if driver
+ * can be configured as an FTM responder (for example, an AP that
+ * services FTM requests). %QCA_NL80211_VENDOR_SUBCMD_FTM_CFG_RESPONDER
+ * will be supported if set.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR: Set if driver
+ * can run FTM sessions. %QCA_NL80211_VENDOR_SUBCMD_FTM_START_SESSION
+ * will be supported if set.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP: Set if FTM responder
+ * supports immediate (ASAP) response.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA: Set if driver supports standalone
+ * AOA measurement using %QCA_NL80211_VENDOR_SUBCMD_AOA_MEAS
+ * @QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM: Set if driver supports
+ * requesting AOA measurements as part of an FTM session.
+ */
+enum qca_wlan_vendor_attr_loc_capa_flags {
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_RESPONDER = 1 << 0,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_FTM_INITIATOR = 1 << 1,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_ASAP = 1 << 2,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA = 1 << 3,
+ QCA_WLAN_VENDOR_ATTR_LOC_CAPA_FLAG_AOA_IN_FTM = 1 << 4,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_peer_info: information about
+ * a single peer in a measurement session.
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR: The MAC address of the peer.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS: Various flags related
+ * to measurement. See %enum qca_wlan_vendor_attr_ftm_peer_meas_flags.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS: Nested attribute of
+ * FTM measurement parameters, as specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.167. See %enum qca_wlan_vendor_attr_ftm_meas_param for
+ * list of supported attributes.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID: Initial token ID for
+ * secure measurement
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD: Request AOA
+ * measurement every _value_ bursts. If 0 or not specified,
+ * AOA measurements will be disabled for this peer.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_info {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAC_ADDR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_meas_flags: Measurement request flags,
+ * per-peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP: If set, request
+ * immediate (ASAP) response from peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI: If set, request
+ * LCI report from peer. The LCI report includes the absolute
+ * location of the peer in "official" coordinates (similar to GPS).
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.7 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR: If set, request
+ * Location civic report from peer. The LCR includes the location
+ * of the peer in free-form format. See IEEE P802.11-REVmc/D7.0,
+ * 11.24.6.7 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE: If set,
+ * request a secure measurement.
+ * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID must also be provided.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_meas_flags {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_ASAP = 1 << 0,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCI = 1 << 1,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_LCR = 1 << 2,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAG_SECURE = 1 << 3,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_meas_param: Measurement parameters
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST: Number of measurements
+ * to perform in a single burst.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP: Number of bursts to
+ * perform, specified as an exponent (2^value)
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION: Duration of burst
+ * instance, as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD: Time between bursts,
+ * as specified in IEEE P802.11-REVmc/D7.0, 9.4.2.167. Must
+ * be larger than %QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION
+ */
+enum qca_wlan_vendor_attr_ftm_meas_param {
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MEAS_PER_BURST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_NUM_BURSTS_EXP,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_DURATION,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_BURST_PERIOD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PARAM_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result: Per-peer results
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR: MAC address of the reported
+ * peer
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS: Status of measurement
+ * request for this peer.
+ * See %enum qca_wlan_vendor_attr_ftm_peer_result_status
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS: Various flags related
+ * to measurement results for this peer.
+ * See %enum qca_wlan_vendor_attr_ftm_peer_result_flags
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS: Specified when
+ * request failed and peer requested not to send an additional request
+ * for this number of seconds.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI: LCI report when received
+ * from peer. In the format specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.22.10
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR: Location civic report when
+ * received from peer.In the format specified by IEEE P802.11-REVmc/D7.0,
+ * 9.4.2.22.13
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS: Reported when peer
+ * overridden some measurement request parameters. See
+ * enum qca_wlan_vendor_attr_ftm_meas_param.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS: AOA measurement
+ * for this peer. Same contents as %QCA_WLAN_VENDOR_ATTR_AOA_MEAS_RESULT
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS: Array of measurement
+ * results. Each entry is a nested attribute defined
+ * by enum qca_wlan_vendor_attr_ftm_meas.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAC_ADDR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAGS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCI,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_LCR,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AOA_MEAS,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result_status
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK: Request sent ok and results
+ * will be provided. Peer may have overridden some measurement parameters,
+ * in which case overridden parameters will be report by
+ * %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_MEAS_PARAMS attribute
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE: Peer is incapable
+ * of performing the measurement request. No more results will be sent
+ * for this peer in this session.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED: Peer reported request
+ * failed, and requested not to send an additional request for number
+ * of seconds specified by %QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_VALUE_SECONDS
+ * attribute.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID: Request validation
+ * failed. Request was not sent over the air.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result_status {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_OK,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INCAPABLE,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_FAILED,
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_STATUS_INVALID,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_peer_result_flags : Various flags
+ * for measurement result, per-peer
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE: If set,
+ * measurement completed for this peer. No more results will be reported
+ * for this peer in this session.
+ */
+enum qca_wlan_vendor_attr_ftm_peer_result_flags {
+ QCA_WLAN_VENDOR_ATTR_FTM_PEER_RES_FLAG_DONE = 1 << 0,
+};
+
+/**
+ * enum qca_vendor_attr_loc_session_status: Session completion status code
+ *
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK: Session completed
+ * successfully.
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED: Session aborted
+ * by request
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID: Session request
+ * was invalid and was not started
+ * @QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED: Session had an error
+ * and did not complete normally (for example out of resources)
+ *
+ */
+enum qca_vendor_attr_loc_session_status {
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_OK,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_ABORTED,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_INVALID,
+ QCA_WLAN_VENDOR_ATTR_LOC_SESSION_STATUS_FAILED,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_ftm_meas: Single measurement data
+ *
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1: Time of departure(TOD) of FTM packet as
+ * recorded by responder, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2: Time of arrival(TOA) of FTM packet at
+ * initiator, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3: TOD of ACK packet as recorded by
+ * initiator, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4: TOA of ACK packet at
+ * responder, in picoseconds.
+ * See IEEE P802.11-REVmc/D7.0, 11.24.6.4 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI: RSSI (signal level) as recorded
+ * during this measurement exchange. Optional and will be provided if
+ * the hardware can measure it.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR: TOD error reported by
+ * responder. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR: TOA error reported by
+ * responder. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR: TOD error measured by
+ * initiator. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR: TOA error measured by
+ * initiator. Not always provided.
+ * See IEEE P802.11-REVmc/D7.0, 9.6.8.33 for more information.
+ * @QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD: Dummy attribute for padding.
+ */
+enum qca_wlan_vendor_attr_ftm_meas {
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INVALID,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T1,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T2,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T3,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_T4,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_RSSI,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOD_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_TOA_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOD_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_INITIATOR_TOA_ERR,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PAD,
+ /* keep last */
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST,
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_MAX =
+ QCA_WLAN_VENDOR_ATTR_FTM_MEAS_AFTER_LAST - 1,
+};
+
+/**
+ * enum qca_wlan_vendor_attr_aoa_type: AOA measurement type
+ *
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE: Phase of the strongest
+ * CIR (channel impulse response) path for each antenna.
+ * @QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP: Phase and amplitude
+ * of the strongest CIR path for each antenna.
+ */
+enum qca_wlan_vendor_attr_aoa_type {
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_TOP_CIR_PHASE_AMP,
+ QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX,
+};
+
+/* vendor event indices, used from both cfg80211.c and ftm.c */
+enum qca_nl80211_vendor_events_index {
+ QCA_NL80211_VENDOR_EVENT_FTM_MEAS_RESULT_INDEX,
+ QCA_NL80211_VENDOR_EVENT_FTM_SESSION_DONE_INDEX,
+ QCA_NL80211_VENDOR_EVENT_AOA_MEAS_RESULT_INDEX,
+};
+
+/* measurement parameters. Specified for each peer as part
+ * of measurement request, or provided with measurement
+ * results for peer in case peer overridden parameters
+ */
+struct wil_ftm_meas_params {
+ u8 meas_per_burst;
+ u8 num_of_bursts_exp;
+ u8 burst_duration;
+ u16 burst_period;
+};
+
+/* measurement request for a single peer */
+struct wil_ftm_meas_peer_info {
+ u8 mac_addr[ETH_ALEN];
+ u8 channel;
+ u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */
+ struct wil_ftm_meas_params params;
+ u8 secure_token_id;
+};
+
+/* session request, passed to wil_ftm_cfg80211_start_session */
+struct wil_ftm_session_request {
+ u64 session_cookie;
+ u32 n_peers;
+ /* keep last, variable size according to n_peers */
+ struct wil_ftm_meas_peer_info peers[0];
+};
+
+/* single measurement for a peer */
+struct wil_ftm_peer_meas {
+ u64 t1, t2, t3, t4;
+};
+
+/* measurement results for a single peer */
+struct wil_ftm_peer_meas_res {
+ u8 mac_addr[ETH_ALEN];
+ u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_result_flags */
+ u8 status; /* enum qca_wlan_vendor_attr_ftm_peer_result_status */
+ u8 value_seconds;
+ bool has_params; /* true if params is valid */
+ struct wil_ftm_meas_params params; /* peer overridden params */
+ u8 *lci;
+ u8 lci_length;
+ u8 *lcr;
+ u8 lcr_length;
+ u32 n_meas;
+ /* keep last, variable size according to n_meas */
+ struct wil_ftm_peer_meas meas[0];
+};
+
+/* standalone AOA measurement request */
+struct wil_aoa_meas_request {
+ u8 mac_addr[ETH_ALEN];
+ u32 type;
+};
+
+/* AOA measurement result */
+struct wil_aoa_meas_result {
+ u8 mac_addr[ETH_ALEN];
+ u32 type;
+ u32 antenna_array_mask;
+ u32 status;
+ u32 length;
+ /* keep last, variable size according to length */
+ u8 data[0];
+};
+
+/* private data related to FTM. Part of the wil6210_priv structure */
+struct wil_ftm_priv {
+ struct mutex lock; /* protects the FTM data */
+ u8 session_started;
+ u64 session_cookie;
+ struct wil_ftm_peer_meas_res *ftm_res;
+ u8 has_ftm_res;
+ u32 max_ftm_meas;
+
+ /* standalone AOA measurement */
+ u8 aoa_started;
+ u8 aoa_peer_mac_addr[ETH_ALEN];
+ u32 aoa_type;
+ struct timer_list aoa_timer;
+ struct work_struct aoa_timeout_work;
+};
+
+int wil_ftm_get_capabilities(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_abort_session(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_ftm_configure_responder(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_aoa_start_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+int wil_aoa_abort_measurement(struct wiphy *wiphy, struct wireless_dev *wdev,
+ const void *data, int data_len);
+
+#endif /* __WIL6210_FTM_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index a509841c3187..5285ebc8b9af 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -518,6 +518,8 @@ int wil_priv_init(struct wil6210_priv *wil)
spin_lock_init(&wil->wmi_ev_lock);
init_waitqueue_head(&wil->wq);
+ wil_ftm_init(wil);
+
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
return -EAGAIN;
@@ -565,6 +567,7 @@ void wil_priv_deinit(struct wil6210_priv *wil)
{
wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_ftm_deinit(wil);
wil_set_recovery_state(wil, fw_recovery_idle);
del_timer_sync(&wil->scan_timer);
del_timer_sync(&wil->p2p.discovery_timer);
@@ -1056,6 +1059,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_enable_irq(wil);
wil_p2p_stop_radio_operations(wil);
+ wil_ftm_stop_operations(wil);
mutex_lock(&wil->p2p_wdev_mutex);
if (wil->scan_request) {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index ce33e919d321..a19dba5b9e5f 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -25,6 +25,7 @@
#include <linux/types.h>
#include "wmi.h"
#include "wil_platform.h"
+#include "ftm.h"
extern bool no_fw_recovery;
extern unsigned int mtu_max;
@@ -668,6 +669,8 @@ struct wil6210_priv {
/* High Access Latency Policy voting */
struct wil_halp halp;
+ struct wil_ftm_priv ftm;
+
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
struct notifier_block pm_notify;
@@ -872,6 +875,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
+int wmi_aoa_meas(struct wil6210_priv *wil, const void *mac_addr, u8 chan,
+ u8 type);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@@ -915,4 +920,18 @@ void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
+void wil_ftm_init(struct wil6210_priv *wil);
+void wil_ftm_deinit(struct wil6210_priv *wil);
+void wil_ftm_stop_operations(struct wil6210_priv *wil);
+void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
+ struct wil_aoa_meas_result *result);
+
+void wil_ftm_evt_session_ended(struct wil6210_priv *wil,
+ struct wmi_tof_session_end_event *evt);
+void wil_ftm_evt_per_dest_res(struct wil6210_priv *wil,
+ struct wmi_tof_ftm_per_dest_res_event *evt);
+void wil_aoa_evt_meas(struct wil6210_priv *wil,
+ struct wmi_aoa_meas_event *evt,
+ int len);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 6ec3ddc5b6f1..daa7a33d12d8 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -22,6 +22,7 @@
#include "txrx.h"
#include "wmi.h"
#include "trace.h"
+#include "ftm.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
@@ -772,6 +773,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+static void wmi_evt_aoa_meas(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_aoa_meas_event *evt = d;
+
+ wil_aoa_evt_meas(wil, evt, len);
+}
+
+static void wmi_evt_ftm_session_ended(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_tof_session_end_event *evt = d;
+
+ wil_ftm_evt_session_ended(wil, evt);
+}
+
+static void wmi_evt_per_dest_res(struct wil6210_priv *wil, int id,
+ void *d, int len)
+{
+ struct wmi_tof_ftm_per_dest_res_event *evt = d;
+
+ wil_ftm_evt_per_dest_res(wil, evt);
+}
+
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@@ -799,6 +824,13 @@ static const struct {
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
+ {WMI_AOA_MEAS_EVENTID, wmi_evt_aoa_meas},
+ {WMI_TOF_SESSION_END_EVENTID, wmi_evt_ftm_session_ended},
+ {WMI_TOF_GET_CAPABILITIES_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_SET_LCR_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_SET_LCI_EVENTID, wmi_evt_ignore},
+ {WMI_TOF_FTM_PER_DEST_RES_EVENTID, wmi_evt_per_dest_res},
+ {WMI_TOF_CHANNEL_INFO_EVENTID, wmi_evt_ignore},
};
/*
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index ec6955452391..1e56d445c6e1 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -1404,7 +1404,6 @@ static int cnss_wlan_is_codeswap_supported(u16 revision)
static int cnss_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
@@ -1418,15 +1417,6 @@ static int cnss_smmu_init(struct device *dev)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- pr_err("%s: set disable_htw attribute failed, err = %d\n",
- __func__, ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret) {
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index af1e5a70d585..df3901093006 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/msm_gsi.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include "gsi.h"
#include "gsi_reg.h"
@@ -26,6 +27,8 @@
#define GSI_MHI_ER_START 10
#define GSI_MHI_ER_END 16
+#define GSI_RESET_WA_MIN_SLEEP 1000
+#define GSI_RESET_WA_MAX_SLEEP 2000
static const struct of_device_id msm_gsi_match[] = {
{ .compatible = "qcom,msm_gsi", },
{ },
@@ -105,6 +108,11 @@ static void gsi_handle_ch_ctrl(int ee)
GSIDBG("ch %x\n", ch);
for (i = 0; i < 32; i++) {
if ((1 << i) & ch) {
+ if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
+ GSIERR("invalid channel %d\n", i);
+ break;
+ }
+
ctx = &gsi_ctx->chan[i];
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
@@ -113,6 +121,7 @@ static void gsi_handle_ch_ctrl(int ee)
GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
GSIDBG("ch %u state updated to %u\n", i, ctx->state);
complete(&ctx->compl);
+ gsi_ctx->ch_dbg[i].cmd_completed++;
}
}
@@ -132,6 +141,11 @@ static void gsi_handle_ev_ctrl(int ee)
GSIDBG("ev %x\n", ch);
for (i = 0; i < 32; i++) {
if ((1 << i) & ch) {
+ if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+ GSIERR("invalid event %d\n", i);
+ break;
+ }
+
ctx = &gsi_ctx->evtr[i];
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
@@ -170,7 +184,12 @@ static void gsi_handle_glob_err(uint32_t err)
gsi_ctx->per.notify_cb(&per_notify);
break;
case GSI_ERR_TYPE_CHAN:
- BUG_ON(log->virt_idx >= GSI_MAX_CHAN);
+ if (log->virt_idx >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ch = &gsi_ctx->chan[log->virt_idx];
chan_notify.chan_user_data = ch->props.chan_user_data;
chan_notify.err_desc = err & 0xFFFF;
@@ -213,7 +232,12 @@ static void gsi_handle_glob_err(uint32_t err)
WARN_ON(1);
break;
case GSI_ERR_TYPE_EVT:
- BUG_ON(log->virt_idx >= GSI_MAX_EVT_RING);
+ if (log->virt_idx >= gsi_ctx->max_ev) {
+ GSIERR("Unexpected ev %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ev = &gsi_ctx->evtr[log->virt_idx];
evt_notify.user_data = ev->props.user_data;
evt_notify.err_desc = err & 0xFFFF;
@@ -257,6 +281,9 @@ static void gsi_handle_glob_ee(int ee)
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
err = gsi_readl(gsi_ctx->base +
GSI_EE_n_ERROR_LOG_OFFS(ee));
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(ee));
gsi_writel(clr, gsi_ctx->base +
GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
gsi_handle_glob_err(err);
@@ -311,7 +338,12 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
uint64_t rp;
ch_id = evt->chid;
- BUG_ON(ch_id >= GSI_MAX_CHAN);
+ if (ch_id >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", ch_id);
+ WARN_ON(1);
+ return;
+ }
+
ch_ctx = &gsi_ctx->chan[ch_id];
BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
rp = evt->xfer_ptr;
@@ -567,6 +599,75 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
return IRQ_HANDLED;
}
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max channels %d\n", reg);
+
+ return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max event rings %d\n", reg);
+
+ return reg;
+}
int gsi_complete_clk_grant(unsigned long dev_hdl)
{
unsigned long flags;
@@ -611,6 +712,11 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
return -GSI_STATUS_INVALID_PARAMS;
}
+ if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+ GSIERR("bad params gsi_ver=%d\n", props->ver);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
if (!props->notify_cb) {
GSIERR("notify callback must be provided\n");
return -GSI_STATUS_INVALID_PARAMS;
@@ -668,8 +774,25 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
mutex_init(&gsi_ctx->mlock);
atomic_set(&gsi_ctx->num_chan, 0);
atomic_set(&gsi_ctx->num_evt_ring, 0);
- /* only support 16 un-reserved + 7 reserved event virtual IDs */
- gsi_ctx->evt_bmap = ~0x7E03FF;
+ gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ch == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max channels\n");
+ return -GSI_STATUS_ERROR;
+ }
+ gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ev == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max event rings\n");
+ return -GSI_STATUS_ERROR;
+ }
+
+ /* bitmap is max events excludes reserved events */
+ gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+ gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
+ ((1 << GSI_MHI_ER_START) - 1);
/*
* enable all interrupts but GSI_BREAK_POINT.
@@ -693,6 +816,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
else
GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
*dev_hdl = (uintptr_t)gsi_ctx;
return GSI_STATUS_SUCCESS;
@@ -1059,7 +1186,7 @@ int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1093,7 +1220,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1160,7 +1287,7 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1194,7 +1321,7 @@ int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1255,7 +1382,7 @@ int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1291,7 +1418,7 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1382,7 +1509,7 @@ static int gsi_validate_channel_props(struct gsi_chan_props *props)
{
uint64_t ra;
- if (props->ch_id >= GSI_MAX_CHAN) {
+ if (props->ch_id >= gsi_ctx->max_ch) {
GSIERR("ch_id %u invalid\n", props->ch_id);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1489,6 +1616,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
ctx->props = *props;
mutex_lock(&gsi_ctx->mlock);
+ gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1573,7 +1701,7 @@ int gsi_write_channel_scratch(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1610,7 +1738,7 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1642,7 +1770,7 @@ int gsi_start_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1659,6 +1787,7 @@ int gsi_start_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_start++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1694,7 +1823,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1716,6 +1845,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1763,7 +1893,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1784,6 +1914,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1832,7 +1963,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1848,6 +1979,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
reset:
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -1869,6 +2001,7 @@ reset:
/* workaround: reset GSI producers again */
if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
+ usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
reset_done = true;
goto reset;
}
@@ -1898,7 +2031,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1913,6 +2046,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
mutex_lock(&gsi_ctx->mlock);
init_completion(&ctx->compl);
+ gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
@@ -2021,7 +2155,7 @@ int gsi_query_channel_info(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !info) {
+ if (chan_hdl >= gsi_ctx->max_ch || !info) {
GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2091,7 +2225,7 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !is_empty) {
+ if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
chan_hdl, is_empty);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2155,7 +2289,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !num_xfers || !xfer) {
+ if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
chan_hdl, num_xfers, xfer);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2242,7 +2376,7 @@ int gsi_start_xfer(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2278,7 +2412,7 @@ int gsi_poll_channel(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !notify) {
+ if (chan_hdl >= gsi_ctx->max_ch || !notify) {
GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2327,7 +2461,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2390,7 +2524,7 @@ int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2426,7 +2560,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2471,9 +2605,9 @@ static void gsi_configure_ieps(void *base)
gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS);
+ gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+ gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+ gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
@@ -2502,9 +2636,9 @@ static void gsi_configure_bck_prs_matrix(void *base)
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
gsi_writel(0x00000000,
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
- gsi_writel(0x00ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
- gsi_writel(0xfdffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
@@ -2551,15 +2685,35 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
}
/* Enable the MCS and set to x2 clocks */
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK));
- gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ if (gsi_ctx->per.ver >= GSI_VER_1_2) {
+ value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+ gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+ ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+ GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+ ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+ GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ } else {
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ }
iounmap(gsi_base);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 1d438ffb8b76..750ae2b329d3 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -19,8 +19,8 @@
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
-#define GSI_MAX_CHAN 31
-#define GSI_MAX_EVT_RING 23
+#define GSI_CHAN_MAX 31
+#define GSI_EVT_RING_MAX 23
#define GSI_NO_EVT_ERINDEX 31
#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
@@ -125,13 +125,24 @@ struct gsi_ee_scratch {
uint32_t word1;
};
+struct ch_debug_stats {
+ unsigned long ch_allocate;
+ unsigned long ch_start;
+ unsigned long ch_stop;
+ unsigned long ch_reset;
+ unsigned long ch_de_alloc;
+ unsigned long ch_db_stop;
+ unsigned long cmd_completed;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
struct gsi_per_props per;
bool per_registered;
- struct gsi_chan_ctx chan[GSI_MAX_CHAN];
- struct gsi_evt_ctx evtr[GSI_MAX_EVT_RING];
+ struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+ struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
+ struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -141,6 +152,8 @@ struct gsi_ctx {
struct gsi_ee_scratch scratch;
int num_ch_dp_stats;
struct workqueue_struct *dp_stat_wq;
+ u32 max_ch;
+ u32 max_ev;
};
enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 2ab8b79acc6d..5eb9084292a4 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -71,7 +71,7 @@ static ssize_t gsi_dump_evt(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_EVT_RING) {
+ if (arg1 >= gsi_ctx->max_ev) {
TERR("invalid evt ring id %u\n", arg1);
return -EFAULT;
}
@@ -184,7 +184,7 @@ static ssize_t gsi_dump_ch(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_CHAN) {
+ if (arg1 >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", arg1);
return -EFAULT;
}
@@ -271,9 +271,30 @@ static ssize_t gsi_dump_ee(struct file *file,
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ if (gsi_ctx->per.ver == GSI_VER_1_0) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_2) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_3) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+ } else {
+ WARN_ON(1);
+ }
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
@@ -329,7 +350,7 @@ static ssize_t gsi_dump_map(struct file *file,
int i;
TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
- for (i = 0; i < GSI_MAX_CHAN; i++) {
+ for (i = 0; i < gsi_ctx->max_ch; i++) {
ctx = &gsi_ctx->chan[i];
if (ctx->allocated) {
@@ -402,8 +423,8 @@ static ssize_t gsi_dump_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -464,7 +485,7 @@ static ssize_t gsi_enable_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
@@ -540,7 +561,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
/* get */
if (kstrtou32(dbg_buff, 0, &ch_id))
goto error;
- if (ch_id >= GSI_MAX_CHAN)
+ if (ch_id >= gsi_ctx->max_ch)
goto error;
PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
gsi_ctx->chan[ch_id].props.max_re_expected);
@@ -553,7 +574,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
- if (ch_id >= GSI_MAX_CHAN) {
+ if (ch_id >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", ch_id);
goto error;
}
@@ -572,7 +593,7 @@ static void gsi_wq_print_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].print_dp_stats)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
}
@@ -618,7 +639,7 @@ static void gsi_wq_update_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].allocated &&
gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
gsi_ctx->chan[ch_id].enable_dp_stats)
@@ -649,8 +670,8 @@ static ssize_t gsi_rst_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -691,7 +712,7 @@ static ssize_t gsi_print_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 36a74105b490..fa1e84896f73 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,10 @@
#define GSI_GSI_CFG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000000)
#define GSI_GSI_CFG_RMSK 0xf
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
@@ -26,6 +30,11 @@
#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define GSI_GSI_MCS_CFG_OFFS \
+ (GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000008)
#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff
@@ -99,8 +108,20 @@
#define GSI_GSI_CGC_CTRL_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000060)
#define GSI_GSI_CGC_CTRL_RMSK 0x3f
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_BMSK 0x20
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_SHFT 0x5
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4
#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8
@@ -619,23 +640,23 @@
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000430)
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000434)
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000438)
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
@@ -701,7 +722,9 @@
#define GSI_GSI_DEBUG_BUSY_REG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00001010)
-#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0x7f
+#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
@@ -1345,22 +1368,150 @@
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
-#define GSI_EE_n_GSI_HW_PARAM_OFFS(n) \
+/* v1.0 */
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+/* v1.2 */
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+/* v1.3 */
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
-#define GSI_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
-#define GSI_EE_n_GSI_HW_PARAM_MAXn 3
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
@@ -1662,7 +1813,7 @@
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x00003fff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \
@@ -1670,7 +1821,7 @@
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x000003ff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 05ce3969a5c7..75b193def36e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -26,7 +26,8 @@
#define IPA_API_DISPATCH_RETURN(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = -EPERM; \
} \
else { \
@@ -44,7 +45,8 @@
#define IPA_API_DISPATCH(api, p...) \
do { \
if (!ipa_api_ctrl) \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
else { \
if (ipa_api_ctrl->api) { \
ipa_api_ctrl->api(p); \
@@ -59,7 +61,8 @@
#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = NULL; \
} \
else { \
@@ -77,7 +80,8 @@
#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = false; \
} \
else { \
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 838b78c1934d..d18308344431 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2034,7 +2034,7 @@ static void ipa_usb_debugfs_init(void)
ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
if (IS_ERR(ipa3_usb_ctx->dent)) {
- IPA_USB_ERR("fail to create folder in debug_fs.\n");
+ pr_err("fail to create folder in debug_fs.\n");
return;
}
@@ -2043,7 +2043,7 @@ static void ipa_usb_debugfs_init(void)
&ipa3_ipa_usb_ops);
if (!ipa3_usb_ctx->dfile_state_info ||
IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
- IPA_USB_ERR("failed to create file for state_info\n");
+ pr_err("failed to create file for state_info\n");
goto fail;
}
@@ -2644,11 +2644,11 @@ static int __init ipa3_usb_init(void)
unsigned long flags;
int res;
- IPA_USB_DBG("entry\n");
+ pr_debug("entry\n");
ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
if (ipa3_usb_ctx == NULL) {
- IPA_USB_ERR("failed to allocate memory\n");
- IPA_USB_ERR(":ipa_usb init failed\n");
+ pr_err("failed to allocate memory\n");
+ pr_err(":ipa_usb init failed\n");
return -EFAULT;
}
memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
@@ -2680,19 +2680,19 @@ static int __init ipa3_usb_init(void)
ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
if (!ipa3_usb_ctx->wq) {
- IPA_USB_ERR("failed to create workqueue\n");
+ pr_err("failed to create workqueue\n");
res = -EFAULT;
goto ipa_usb_workqueue_fail;
}
ipa_usb_debugfs_init();
- IPA_USB_INFO("exit: IPA_USB init success!\n");
+ pr_info("exit: IPA_USB init success!\n");
return 0;
ipa_usb_workqueue_fail:
- IPA_USB_ERR(":init failed (%d)\n", -res);
+ pr_err(":init failed (%d)\n", -res);
kfree(ipa3_usb_ctx);
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index 209264d69b26..bcdd99deae1f 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -820,7 +820,8 @@ static void ipa_rm_wq_resume_handler(struct work_struct *work)
}
ipa_rm_resource_consumer_request_work(
(struct ipa_rm_resource_cons *)resource,
- ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true);
+ ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true,
+ ipa_rm_work->inc_usage_count);
spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
bail:
kfree(ipa_rm_work);
@@ -916,7 +917,8 @@ int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_state prev_state,
- u32 needed_bw)
+ u32 needed_bw,
+ bool inc_usage_count)
{
int result = -ENOMEM;
struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
@@ -926,6 +928,7 @@ int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
work->resource_name = resource_name;
work->prev_state = prev_state;
work->needed_bw = needed_bw;
+ work->inc_usage_count = inc_usage_count;
result = queue_work(ipa_rm_ctx->ipa_rm_wq,
(struct work_struct *)work);
} else {
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
index eb86c54d7382..1610bb1e1ead 100644
--- a/drivers/platform/msm/ipa/ipa_rm_i.h
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -118,6 +118,7 @@ struct ipa_rm_wq_suspend_resume_work_type {
enum ipa_rm_resource_name resource_name;
enum ipa_rm_resource_state prev_state;
u32 needed_bw;
+ bool inc_usage_count;
};
@@ -128,7 +129,8 @@ int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_state prev_state,
- u32 needed_bw);
+ u32 needed_bw,
+ bool inc_usage_count);
int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_state prev_state,
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 0a3f66307eee..da4490ce0aa0 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -116,7 +116,8 @@ bail:
int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
enum ipa_rm_resource_state prev_state,
u32 prod_needed_bw,
- bool notify_completion)
+ bool notify_completion,
+ bool dec_client_on_err)
{
int driver_result;
@@ -135,7 +136,8 @@ int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
} else if (driver_result != -EINPROGRESS) {
consumer->resource.state = prev_state;
consumer->resource.needed_bw -= prod_needed_bw;
- consumer->usage_count--;
+ if (dec_client_on_err)
+ consumer->usage_count--;
}
return driver_result;
@@ -170,19 +172,22 @@ int ipa_rm_resource_consumer_request(
ipa_rm_resource_str(consumer->resource.name));
ipa_rm_wq_send_resume_cmd(consumer->resource.name,
prev_state,
- prod_needed_bw);
+ prod_needed_bw,
+ inc_usage_count);
result = -EINPROGRESS;
break;
}
result = ipa_rm_resource_consumer_request_work(consumer,
prev_state,
prod_needed_bw,
- false);
+ false,
+ inc_usage_count);
break;
case IPA_RM_GRANTED:
if (wake_client) {
result = ipa_rm_resource_consumer_request_work(
- consumer, prev_state, prod_needed_bw, false);
+ consumer, prev_state, prod_needed_bw, false,
+ inc_usage_count);
break;
}
ipa_rm_perf_profile_change(consumer->resource.name);
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
index 5c3a0190753f..da149c51c96c 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.h
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -155,7 +155,8 @@ int ipa_rm_resource_producer_print_stat(
int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
enum ipa_rm_resource_state prev_state,
u32 needed_bw,
- bool notify_completion);
+ bool notify_completion,
+ bool dec_client_on_err);
int ipa_rm_resource_consumer_release_work(
struct ipa_rm_resource_cons *consumer,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 9cb0b1f3c379..804c89dc9533 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -207,7 +207,6 @@ struct platform_device *ipa_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
u32 ipa_base;
@@ -4313,9 +4312,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4502,7 +4498,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4519,17 +4514,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -4589,7 +4573,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
int fast = 1;
@@ -4628,18 +4611,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4694,7 +4665,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4731,18 +4701,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 0eab77d27760..50c387ec785d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1420,6 +1420,7 @@ static ssize_t ipa_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa_ctx->nat_mem.lock);
value = ipa_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1573,6 +1574,7 @@ static ssize_t ipa_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 16f50030b960..3c2a6d4620ba 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -3152,23 +3152,23 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
- if (in->recycle_enabled) {
+ sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa_fast_replenish_rx_cache;
- sys->repl_trig_thresh =
- sys->rx_pool_sz / 8;
- } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ if (in->napi_enabled) {
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->recycle_enabled) {
sys->repl_hdlr =
- ipa_replenish_rx_cache;
+ ipa_replenish_rx_cache_recycle;
}
- sys->rx_pool_sz =
- ipa_ctx->wan_rx_ring_size;
}
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 249de808ec5c..f5afb4b0141c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -523,10 +523,9 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
start = buf;
while (1) {
- prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
-
mutex_lock(&ipa_ctx->msg_lock);
locked = 1;
+ prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
if (!list_empty(&ipa_ctx->msg_list)) {
msg = list_first_entry(&ipa_ctx->msg_list,
struct ipa_push_msg, link);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 137a43a1217b..3f20941155a5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -493,6 +493,8 @@ static int qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -538,7 +540,8 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -574,7 +577,8 @@ int qmi_enable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -618,7 +622,8 @@ int qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -688,7 +693,8 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1089,7 +1095,8 @@ int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1118,7 +1125,8 @@ int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1150,7 +1158,8 @@ int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1184,7 +1193,8 @@ int ipa_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index ebb93e246048..96003d7a16a0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1259,11 +1259,13 @@ static int handle_ingress_format(struct net_device *dev,
ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
- ipa_to_apps_ep_cfg.recycle_enabled = true;
- ep_cfg = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- ep_cfg->recycle = ipa_recycle_wan_skb;
- pr_info("Wan Recycle Enabled\n");
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ ipa_to_apps_ep_cfg.recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 24fbc5c738d8..ab62dbcddd22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -251,7 +251,6 @@ struct platform_device *ipa3_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
bool use_64_bit_dma_mask;
@@ -3791,6 +3790,32 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+ enum gsi_ver gsi_ver;
+
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ gsi_ver = GSI_VER_1_0;
+ break;
+ case IPA_HW_v3_5:
+ gsi_ver = GSI_VER_1_2;
+ break;
+ case IPA_HW_v3_5_1:
+ gsi_ver = GSI_VER_1_3;
+ break;
+ default:
+ IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+ WARN_ON(1);
+ gsi_ver = GSI_VER_ERR;
+ }
+
+ IPADBG("GSI version %d\n", gsi_ver);
+
+ return gsi_ver;
+}
+
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
@@ -3820,6 +3845,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
gsi_props.ee = resource_p->ee;
gsi_props.intr = GSI_INTR_IRQ;
gsi_props.irq = resource_p->transport_irq;
@@ -4695,9 +4721,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->ipa_tz_unlock_reg_num = 0;
ipa_drv_res->ipa_tz_unlock_reg = NULL;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4953,7 +4976,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4973,17 +4995,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -5056,7 +5067,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int bypass = 1;
int fast = 1;
@@ -5102,18 +5112,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -5168,7 +5166,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -5216,17 +5213,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3915f652d87b..25e5e3b74f26 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1478,6 +1478,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa3_ctx->nat_mem.lock);
value = ipa3_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1631,6 +1632,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa3_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 643e40402499..09c7c1b0fd05 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -778,10 +778,28 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
*/
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
{
+ return ipa3_send_cmd_timeout(num_desc, descr, 0);
+}
+
+/**
+ * ipa3_send_cmd_timeout - send immediate commands with limited time
+ * waiting for ACK from IPA HW
+ * @num_desc: number of descriptors within the desc struct
+ * @descr: descriptor structure
+ * @timeout: millisecond to wait till get ACK from IPA HW
+ *
+ * Function will block till command gets ACK from IPA HW or timeout.
+ * Caller needs to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
+{
struct ipa3_desc *desc;
int i, result = 0;
struct ipa3_sys_context *sys;
int ep_idx;
+ int completed;
for (i = 0; i < num_desc; i++)
IPADBG("sending imm cmd %d\n", descr[i].opcode);
@@ -808,7 +826,14 @@ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
result = -EFAULT;
goto bail;
}
- wait_for_completion(&descr->xfer_done);
+ if (timeout) {
+ completed = wait_for_completion_timeout(
+ &descr->xfer_done, msecs_to_jiffies(timeout));
+ if (!completed)
+ IPADBG("timeout waiting for imm-cmd ACK\n");
+ } else {
+ wait_for_completion(&descr->xfer_done);
+ }
} else {
desc = &descr[num_desc - 1];
init_completion(&desc->xfer_done);
@@ -823,7 +848,15 @@ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
result = -EFAULT;
goto bail;
}
- wait_for_completion(&desc->xfer_done);
+ if (timeout) {
+ completed = wait_for_completion_timeout(
+ &desc->xfer_done, msecs_to_jiffies(timeout));
+ if (!completed)
+ IPADBG("timeout waiting for imm-cmd ACK\n");
+ } else {
+ wait_for_completion(&desc->xfer_done);
+ }
+
}
bail:
@@ -3180,22 +3213,20 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
sys->free_rx_wrapper = ipa3_free_rx_wrapper;
- if (in->recycle_enabled) {
+ sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa3_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa3_fast_replenish_rx_cache;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- } else {
- sys->repl_hdlr =
- ipa3_replenish_rx_cache;
- }
- sys->rx_pool_sz =
- ipa3_ctx->wan_rx_ring_size;
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
}
+ if (in->napi_enabled)
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->napi_enabled && in->recycle_enabled)
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
= true;
if (ipa3_ctx->
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 11da023c9d6a..93fa1492cfd5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -326,7 +326,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
int needed_len;
int mem_size;
- IPADBG_LOW("processing type %d hdr_hdl %d\n",
+ IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
@@ -335,10 +335,17 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
}
hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
- if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+ if (!hdr_entry) {
IPAERR("hdr_hdl is invalid\n");
return -EINVAL;
}
+ if (hdr_entry->cookie != IPA_COOKIE) {
+ IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
+ hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
if (!entry) {
@@ -403,7 +410,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->offset_entry = offset;
list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
htbl->proc_ctx_cnt++;
- IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+ IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
htbl->proc_ctx_cnt, offset->offset);
id = ipa3_id_alloc(entry);
@@ -520,12 +527,12 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_add(&entry->link, &htbl->head_hdr_entry_list);
htbl->hdr_cnt++;
if (entry->is_hdr_proc_ctx)
- IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
hdr->hdr_len,
htbl->hdr_cnt,
&entry->phys_base);
else
- IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+ IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
hdr->hdr_len,
htbl->hdr_cnt,
entry->offset_entry->offset);
@@ -580,7 +587,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
return -EINVAL;
}
- IPADBG("del ctx proc cnt=%d ofst=%d\n",
+ IPADBG("del proc ctx cnt=%d ofst=%d\n",
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (--entry->ref_cnt) {
@@ -624,11 +631,12 @@ int __ipa3_del_hdr(u32 hdr_hdl)
}
if (entry->is_hdr_proc_ctx)
- IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
else
- IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
- htbl->hdr_cnt, entry->offset_entry->offset);
+ IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
+ entry->hdr_len, htbl->hdr_cnt,
+ entry->offset_entry->offset);
if (--entry->ref_cnt) {
IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6f86448319db..33be22f98b9d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -481,7 +481,7 @@ struct ipa_gsi_ep_mem_info {
struct ipa3_status_stats {
struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
- int curr;
+ unsigned int curr;
};
/**
@@ -1835,6 +1835,7 @@ int ipa3_init_mem_partition(struct device_node *dev_node);
int ipa3_controller_static_bind(struct ipa3_controller *controller,
enum ipa_hw_type ipa_hw_type);
int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout);
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
int ipa3_cfg_filter(u32 disable);
int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 22756c1fb168..b9f57552533e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -528,12 +528,12 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
start = buf;
while (1) {
+ mutex_lock(&ipa3_ctx->msg_lock);
+ locked = 1;
prepare_to_wait(&ipa3_ctx->msg_waitq,
&wait,
TASK_INTERRUPTIBLE);
- mutex_lock(&ipa3_ctx->msg_lock);
- locked = 1;
if (!list_empty(&ipa3_ctx->msg_list)) {
msg = list_first_entry(&ipa3_ctx->msg_list,
struct ipa3_push_msg, link);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index bf8a5ade04bd..a6b075583162 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -582,6 +582,8 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -623,6 +625,8 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -703,6 +707,8 @@ int ipa3_qmi_enable_force_clear_datapath_send(
resp_desc.ei_array =
ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -746,7 +752,8 @@ int ipa3_qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -803,6 +810,8 @@ int ipa3_qmi_filter_notify_send(
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1213,6 +1222,8 @@ int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1242,6 +1253,8 @@ int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1273,7 +1286,8 @@ int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1307,7 +1321,8 @@ int ipa3_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 6c7bf500e760..ac7e57f10062 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1210,8 +1210,9 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
list_del(&entry->link);
entry->tbl->rule_cnt--;
- IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
- entry->tbl->idx, entry->tbl->rule_cnt, entry->rule_id);
+ IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
+ entry->tbl->idx, entry->tbl->rule_cnt,
+ entry->rule_id, entry->tbl->ref_cnt);
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
@@ -1488,6 +1489,8 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+ IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
+ entry->idx);
if (__ipa_del_rt_tbl(entry))
IPAERR("fail to del RT tbl\n");
/* commit for put */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index a21eb9c1530b..c0a6e8b00d71 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -47,6 +47,8 @@
#define IPA_EOT_COAL_GRAN_MIN (1)
#define IPA_EOT_COAL_GRAN_MAX (16)
+#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
+
#define IPA_AGGR_BYTE_LIMIT (\
IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
@@ -101,7 +103,7 @@
#define IPA_GROUP_DPL IPA_GROUP_DL
#define IPA_GROUP_DIAG (2)
#define IPA_GROUP_DMA (3)
-#define IPA_GROUP_IMM_CMD IPA_GROUP_DMA
+#define IPA_GROUP_IMM_CMD IPA_GROUP_UL
#define IPA_GROUP_Q6ZIP (4)
#define IPA_GROUP_Q6ZIP_GENERAL IPA_GROUP_Q6ZIP
#define IPA_GROUP_UC_RX_Q (5)
@@ -3470,7 +3472,8 @@ int ipa3_inject_dma_task_for_gsi(void)
desc.type = IPA_IMM_CMD_DESC;
IPADBG("sending 1B packet to IPA\n");
- if (ipa3_send_cmd(1, &desc)) {
+ if (ipa3_send_cmd_timeout(1, &desc,
+ IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
IPAERR("ipa3_send_cmd failed\n");
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index bcd2cb3bfd7a..2bc179d5a33c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1222,9 +1222,9 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
if (!base ||
!hdr_len ||
- (!phys_base && !hdr_base_addr) ||
- !hdr_base_addr ||
- ((is_hdr_proc_ctx == false) && !offset_entry)) {
+ (is_hdr_proc_ctx && !phys_base) ||
+ (!is_hdr_proc_ctx && !offset_entry) ||
+ (!is_hdr_proc_ctx && !hdr_base_addr)) {
IPAHAL_ERR(
"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
, hdr_len, &phys_base, hdr_base_addr
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a2fef45cc55f..f134852e046e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1271,11 +1271,13 @@ static int handle3_ingress_format(struct net_device *dev,
ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
- ipa_wan_ep_cfg->recycle_enabled = true;
- ep_cfg = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- ep_cfg->recycle = ipa_recycle_wan_skb;
- pr_info("Wan Recycle Enabled\n");
+ if (ipa_wan_ep_cfg->napi_enabled) {
+ ipa_wan_ep_cfg->recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
}
}
@@ -1969,9 +1971,9 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
ipa_rmnet_drv_res->ipa_napi_enable =
- of_property_read_bool(pdev->dev.of_node,
- "qcom,napi");
- pr_info("IPA napi = %s\n",
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
return 0;
}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 6d826590cabc..45fedfa72bda 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -569,7 +569,6 @@ err_disable_vregs:
static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
{
- int disable_htw = 1;
int atomic_ctx = 1;
int rc;
int bypass_enable = 1;
@@ -587,17 +586,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
dev_info(ctx->dev, "IOMMU mapping created: %p\n", ctx->mapping);
rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- /* This error can be ignored and not considered fatal,
- * but let the users know this happened
- */
- dev_err(ctx->dev, "Warning: disable coherent HTW failed (%d)\n",
- rc);
- }
-
- rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (rc) {
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
index 0bbda4eb4116..78e685f789cd 100644
--- a/drivers/platform/msm/qpnp-revid.c
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -27,6 +27,7 @@
#define REVID_SUBTYPE 0x5
#define REVID_STATUS1 0x8
#define REVID_SPARE_0 0x60
+#define REVID_FAB_ID 0xf2
#define QPNP_REVID_DEV_NAME "qcom,qpnp-revid"
@@ -154,7 +155,7 @@ static size_t build_pmic_string(char *buf, size_t n, int sid,
static int qpnp_revid_probe(struct platform_device *pdev)
{
u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status;
- u8 option1, option2, option3, option4, spare0;
+ u8 option1, option2, option3, option4, spare0, fab_id;
unsigned int base;
int rc;
char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'};
@@ -199,6 +200,11 @@ static int qpnp_revid_probe(struct platform_device *pdev)
pmic_subtype = PMI8937_PERIPHERAL_SUBTYPE;
}
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,fab-id-valid"))
+ fab_id = qpnp_read_byte(regmap, base + REVID_FAB_ID);
+ else
+ fab_id = -EINVAL;
+
revid_chip = devm_kzalloc(&pdev->dev, sizeof(struct revid_chip),
GFP_KERNEL);
if (!revid_chip)
@@ -211,6 +217,7 @@ static int qpnp_revid_probe(struct platform_device *pdev)
revid_chip->data.rev4 = rev4;
revid_chip->data.pmic_subtype = pmic_subtype;
revid_chip->data.pmic_type = pmic_type;
+ revid_chip->data.fab_id = fab_id;
if (pmic_subtype < ARRAY_SIZE(pmic_names))
revid_chip->data.pmic_name = pmic_names[pmic_subtype];
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 545a1e684b25..2718ea93bd45 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -249,6 +249,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(flash_current_max),
POWER_SUPPLY_ATTR(update_now),
POWER_SUPPLY_ATTR(esr_count),
+ POWER_SUPPLY_ATTR(buck_freq),
POWER_SUPPLY_ATTR(safety_timer_enabled),
POWER_SUPPLY_ATTR(charge_done),
POWER_SUPPLY_ATTR(flash_active),
@@ -267,8 +268,14 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(typec_power_role),
POWER_SUPPLY_ATTR(pd_allowed),
POWER_SUPPLY_ATTR(pd_active),
+ POWER_SUPPLY_ATTR(pd_in_hard_reset),
+ POWER_SUPPLY_ATTR(pd_current_max),
+ POWER_SUPPLY_ATTR(pd_usb_suspend_supported),
POWER_SUPPLY_ATTR(charger_temp),
POWER_SUPPLY_ATTR(charger_temp_max),
+ POWER_SUPPLY_ATTR(parallel_disable),
+ POWER_SUPPLY_ATTR(parallel_percent),
+ POWER_SUPPLY_ATTR(pe_start),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom-charger/bcl_peripheral.c b/drivers/power/qcom-charger/bcl_peripheral.c
index fc958b160f86..8a7012ac2bef 100644
--- a/drivers/power/qcom-charger/bcl_peripheral.c
+++ b/drivers/power/qcom-charger/bcl_peripheral.c
@@ -459,7 +459,7 @@ static void bcl_lmh_dcvs_enable(void)
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
SCM_VAL, SCM_VAL);
- dmac_flush_range(payload, payload + 5 * (sizeof(uint32_t)));
+ dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
if (scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DCVSH),
&desc_arg))
pr_err("Error enabling LMH BCL monitoringfor cluster0\n");
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index 515f31a44ce7..a703b208f6e4 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -38,6 +39,7 @@
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+/* Awake votable reasons */
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
#define PROFILE_LOAD "fg_profile_load"
@@ -54,9 +56,14 @@
CHARS_PER_ITEM) + 1) \
#define FG_SRAM_ADDRESS_MAX 255
+#define PROFILE_LEN 224
+#define PROFILE_COMP_LEN 148
#define BUCKET_COUNT 8
#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+#define KI_COEFF_MAX 62200
+#define KI_COEFF_SOC_LEVELS 3
+
/* Debug flag definitions */
enum fg_debug_flag {
FG_IRQ = BIT(0), /* Show interrupts */
@@ -66,6 +73,7 @@ enum fg_debug_flag {
FG_SRAM_READ = BIT(4), /* Show SRAM reads */
FG_BUS_WRITE = BIT(5), /* Show REGMAP writes */
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
+ FG_CAP_LEARN = BIT(7), /* Show capacity learning */
};
/* SRAM access */
@@ -110,17 +118,29 @@ enum {
DELTA_SOC_IRQ_WA = BIT(0),
};
-/* SRAM parameters */
+/*
+ * List of FG_SRAM parameters. Please add a parameter only if it is an entry
+ * that will be used either to configure an entity (e.g. termination current)
+ * which might need some encoding (or) it is an entry that will be read from
+ * SRAM and decoded (e.g. CC_SOC_SW) for SW to use at various places. For
+ * generic read/writes to SRAM registers, please use fg_sram_read/write APIs
+ * directly without adding an entry here.
+ */
enum fg_sram_param_id {
FG_SRAM_BATT_SOC = 0,
FG_SRAM_VOLTAGE_PRED,
FG_SRAM_OCV,
FG_SRAM_RSLOW,
FG_SRAM_ALG_FLAGS,
+ FG_SRAM_CC_SOC,
+ FG_SRAM_CC_SOC_SW,
+ FG_SRAM_ACT_BATT_CAP,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
FG_SRAM_VBATT_LOW,
+ FG_SRAM_FLOAT_VOLT,
+ FG_SRAM_VBATT_FULL,
FG_SRAM_ESR_TIMER_DISCHG_MAX,
FG_SRAM_ESR_TIMER_DISCHG_INIT,
FG_SRAM_ESR_TIMER_CHG_MAX,
@@ -129,6 +149,8 @@ enum fg_sram_param_id {
FG_SRAM_CHG_TERM_CURR,
FG_SRAM_DELTA_SOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
+ FG_SRAM_KI_COEFF_MED_DISCHG,
+ FG_SRAM_KI_COEFF_HI_DISCHG,
FG_SRAM_MAX,
};
@@ -165,6 +187,8 @@ struct fg_alg_flag {
/* DT parameters for FG device */
struct fg_dt_props {
+ bool force_load_profile;
+ bool hold_soc_while_full;
int cutoff_volt_mv;
int empty_volt_mv;
int vbatt_low_thr_mv;
@@ -177,6 +201,18 @@ struct fg_dt_props {
int esr_timer_charging;
int esr_timer_awake;
int esr_timer_asleep;
+ int cl_start_soc;
+ int cl_max_temp;
+ int cl_min_temp;
+ int cl_max_cap_inc;
+ int cl_max_cap_dec;
+ int cl_max_cap_limit;
+ int cl_min_cap_limit;
+ int jeita_hyst_temp;
+ int batt_temp_delta;
+ int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
};
/* parameters from battery profile */
@@ -184,6 +220,7 @@ struct fg_batt_props {
const char *batt_type_str;
char *batt_profile;
int float_volt_uv;
+ int vbatt_full_mv;
int fastchg_curr_ma;
int batt_id_kohm;
};
@@ -197,11 +234,21 @@ struct fg_cyc_ctr_data {
struct mutex lock;
};
+struct fg_cap_learning {
+ bool active;
+ int init_cc_soc_sw;
+ int64_t nom_cap_uah;
+ int64_t init_cc_uah;
+ int64_t final_cc_uah;
+ int64_t learned_cc_uah;
+ struct mutex lock;
+};
+
struct fg_irq_info {
const char *name;
const irq_handler_t handler;
- int irq;
bool wakeable;
+ int irq;
};
struct fg_chip {
@@ -211,34 +258,44 @@ struct fg_chip {
struct dentry *dfs_root;
struct power_supply *fg_psy;
struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct iio_channel *batt_id_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct fg_sram_param *sp;
+ struct fg_alg_flag *alg_flags;
int *debug_mask;
- char *batt_profile;
+ char batt_profile[PROFILE_LEN];
struct fg_dt_props dt;
struct fg_batt_props bp;
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
+ struct fg_cap_learning cl;
struct mutex bus_lock;
struct mutex sram_rw_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int nom_cap_uah;
+ int batt_id;
int status;
- int prev_status;
- bool batt_id_avail;
+ int charge_done;
+ int last_soc;
+ int last_batt_temp;
+ int health;
+ bool profile_available;
bool profile_loaded;
bool battery_missing;
+ bool fg_restarting;
+ bool charge_full;
+ bool recharge_soc_adjusted;
+ bool ki_coeff_dischg_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
- struct fg_alg_flag *alg_flags;
};
/* Debugfs data structures are below */
@@ -287,4 +344,5 @@ extern int fg_debugfs_create(struct fg_chip *chip);
extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
#endif
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index 9d5874340a8e..431e28a7eb1f 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -126,6 +126,7 @@
/* BATT_INFO_BATT_TEMP_CFG */
#define JEITA_TEMP_HYST_MASK GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT 4
#define JEITA_TEMP_NO_HYST 0x0
#define JEITA_TEMP_HYST_1C 0x1
#define JEITA_TEMP_HYST_2C 0x2
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index bf5a446452a4..0e3c7dbb5731 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -29,6 +29,43 @@ static struct fg_dbgfs dbgfs_data = {
},
};
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
#define EXPONENT_SHIFT 11
#define EXPONENT_OFFSET -9
#define MANTISSA_SIGN_BIT 10
@@ -83,6 +120,9 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -95,6 +135,7 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
* This interrupt need to be enabled only when it is
* required. It will be kept disabled other times.
*/
+ reinit_completion(&chip->soc_update);
enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
atomic_access = true;
} else {
@@ -147,6 +188,9 @@ int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -577,6 +621,17 @@ static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
/* Parse the data in the buffer. It should be a string of numbers */
while ((pos < count) &&
sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ /*
+ * We shouldn't be receiving a string of characters that
+ * exceeds a size of 5 to keep this functionally correct.
+ * Also, we should make sure that pos never gets overflowed
+ * beyond the limit.
+ */
+ if (bytes_read > 5 || bytes_read > INT_MAX - pos) {
+ cnt = 0;
+ ret = -EINVAL;
+ break;
+ }
pos += bytes_read;
values[cnt++] = data & 0xff;
}
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 7739952f3254..4ee94b990382 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/of_batterydata.h>
#include <linux/platform_device.h>
-#include <linux/power_supply.h>
#include <linux/iio/consumer.h>
#include <linux/qpnp/qpnp-revid.h>
#include "fg-core.h"
@@ -35,6 +34,14 @@
#define CUTOFF_VOLT_OFFSET 0
#define SYS_TERM_CURR_WORD 6
#define SYS_TERM_CURR_OFFSET 0
+#define VBATT_FULL_WORD 7
+#define VBATT_FULL_OFFSET 0
+#define KI_COEFF_MED_DISCHG_WORD 9
+#define KI_COEFF_MED_DISCHG_OFFSET 3
+#define KI_COEFF_HI_DISCHG_WORD 10
+#define KI_COEFF_HI_DISCHG_OFFSET 0
+#define KI_COEFF_LOW_DISCHG_WORD 10
+#define KI_COEFF_LOW_DISCHG_OFFSET 2
#define DELTA_SOC_THR_WORD 12
#define DELTA_SOC_THR_OFFSET 3
#define RECHARGE_SOC_THR_WORD 14
@@ -57,20 +64,30 @@
#define PROFILE_LOAD_OFFSET 0
#define NOM_CAP_WORD 58
#define NOM_CAP_OFFSET 0
+#define ACT_BATT_CAP_BKUP_WORD 74
+#define ACT_BATT_CAP_BKUP_OFFSET 0
#define CYCLE_COUNT_WORD 75
#define CYCLE_COUNT_OFFSET 0
#define PROFILE_INTEGRITY_WORD 79
#define PROFILE_INTEGRITY_OFFSET 3
#define BATT_SOC_WORD 91
#define BATT_SOC_OFFSET 0
+#define FULL_SOC_WORD 93
+#define FULL_SOC_OFFSET 2
#define MONOTONIC_SOC_WORD 94
#define MONOTONIC_SOC_OFFSET 2
+#define CC_SOC_WORD 95
+#define CC_SOC_OFFSET 0
+#define CC_SOC_SW_WORD 96
+#define CC_SOC_SW_OFFSET 0
#define VOLTAGE_PRED_WORD 97
#define VOLTAGE_PRED_OFFSET 0
#define OCV_WORD 97
#define OCV_OFFSET 2
#define RSLOW_WORD 101
#define RSLOW_OFFSET 0
+#define ACT_BATT_CAP_WORD 117
+#define ACT_BATT_CAP_OFFSET 0
#define LAST_BATT_SOC_WORD 119
#define LAST_BATT_SOC_OFFSET 0
#define LAST_MONOTONIC_SOC_WORD 119
@@ -79,6 +96,12 @@
#define ALG_FLAGS_OFFSET 1
/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD 9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET 3
+#define KI_COEFF_MED_DISCHG_v2_WORD 10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET 0
+#define KI_COEFF_HI_DISCHG_v2_WORD 10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET 1
#define DELTA_SOC_THR_v2_WORD 13
#define DELTA_SOC_THR_v2_OFFSET 0
#define RECHARGE_SOC_THR_v2_WORD 14
@@ -89,17 +112,21 @@
#define EMPTY_VOLT_v2_OFFSET 3
#define VBATT_LOW_v2_WORD 16
#define VBATT_LOW_v2_OFFSET 0
+#define FLOAT_VOLT_v2_WORD 16
+#define FLOAT_VOLT_v2_OFFSET 2
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int val);
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
static int fg_decode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
-static int fg_decode_batt_soc(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value);
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_mv, u8 *buf);
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_ma, u8 *buf);
static void fg_encode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val, u8 *buf);
@@ -118,15 +145,21 @@ static void fg_encode_default(struct fg_sram_param *sp,
static struct fg_sram_param pmicobalt_v1_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
- fg_decode_batt_soc),
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+ 1, 1, 0, NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -134,11 +167,13 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
-2500, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
-2500, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 256,
+ PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 2048,
100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
1, 256, 100, 0, fg_encode_default, NULL),
@@ -152,19 +187,31 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+ KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+ KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
};
static struct fg_sram_param pmicobalt_v2_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
- fg_decode_batt_soc),
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_BKUP_WORD, ACT_BATT_CAP_BKUP_OFFSET, 2,
+ 1, 1, 0, NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -172,12 +219,16 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
15625, -2000, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
15625, -2000, fg_encode_voltage, NULL),
+ PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
PARAM(DELTA_SOC_THR, DELTA_SOC_THR_v2_WORD, DELTA_SOC_THR_v2_OFFSET, 1,
- 256, 100, 0, fg_encode_default, NULL),
+ 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
NULL),
@@ -191,6 +242,12 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+ KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+ KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
};
static struct fg_alg_flag pmicobalt_v1_alg_flags[] = {
@@ -264,21 +321,40 @@ module_param_named(
sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
);
+static bool fg_sram_dump;
+module_param_named(
+ sram_dump, fg_sram_dump, bool, S_IRUSR | S_IWUSR
+);
+
+static int fg_restart;
+
/* All getters HERE */
-static int fg_decode_value_16b(struct fg_sram_param *sp,
+#define VOLTAGE_15BIT_MASK GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
- sp[id].value = div_u64((u64)(u16)value * sp[id].numrtr, sp[id].denmtr);
+ value &= VOLTAGE_15BIT_MASK;
+ sp[id].value = div_u64((u64)value * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
+}
+
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
+{
+ sp[id].value = div_s64((s64)value * sp[id].numrtr, sp[id].denmtr);
+ sp[id].value = sign_extend32(sp[id].value, 31);
pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
sp[id].value);
return sp[id].value;
}
-static int fg_decode_batt_soc(struct fg_sram_param *sp,
+static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
- sp[id].value = (u32)value >> 24;
+ sp[id].value = div_u64((u64)(u16)value * sp[id].numrtr, sp[id].denmtr);
pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
sp[id].value);
return sp[id].value;
@@ -287,7 +363,8 @@ static int fg_decode_batt_soc(struct fg_sram_param *sp,
static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
int value)
{
- return value;
+ sp[id].value = value;
+ return sp[id].value;
}
static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
@@ -302,14 +379,14 @@ static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
}
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_mv, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
- val += sp[id].offset;
- temp = (int64_t)div_u64((u64)val * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ val_mv += sp[id].offset;
+ temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -319,15 +396,15 @@ static void fg_encode_voltage(struct fg_sram_param *sp,
}
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_ma, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
s64 current_ma;
- current_ma = val;
+ current_ma = val_ma;
temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -378,6 +455,9 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
return -EINVAL;
+ if (chip->battery_missing)
+ return -ENODATA;
+
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -393,12 +473,40 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
return 0;
}
+#define CC_SOC_30BIT GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+ return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+ return 0;
+}
+
#define BATT_TEMP_NUMR 1
#define BATT_TEMP_DENR 1
static int fg_get_battery_temp(struct fg_chip *chip, int *val)
{
- int rc = 0;
- u16 temp = 0;
+ int rc = 0, temp;
u8 buf[2];
rc = fg_read(chip, BATT_INFO_BATT_TEMP_LSB(chip), buf, 2);
@@ -543,7 +651,6 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
-
*val = cap[0];
return 0;
}
@@ -554,6 +661,11 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
int rc, msoc;
+ if (chip->charge_full) {
+ *val = FULL_CAPACITY;
+ return 0;
+ }
+
rc = fg_get_msoc_raw(chip, &msoc);
if (rc < 0)
return rc;
@@ -593,14 +705,12 @@ static int fg_get_batt_id(struct fg_chip *chip, int *val)
return rc;
}
- chip->batt_id_avail = true;
fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
*val = batt_id;
return 0;
}
-#define PROFILE_LEN 224
static int fg_get_batt_profile(struct fg_chip *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -614,13 +724,14 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return rc;
}
+ batt_id /= 1000;
+ chip->batt_id = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
return -ENXIO;
}
- batt_id /= 1000;
profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
NULL);
if (IS_ERR(profile_node))
@@ -652,6 +763,13 @@ static int fg_get_batt_profile(struct fg_chip *chip)
chip->bp.fastchg_curr_ma = -EINVAL;
}
+ rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+ &chip->bp.vbatt_full_mv);
+ if (rc < 0) {
+ pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+ chip->bp.vbatt_full_mv = -EINVAL;
+ }
+
data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
if (!data) {
pr_err("No profile data available\n");
@@ -663,6 +781,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return -EINVAL;
}
+ chip->profile_available = true;
memcpy(chip->batt_profile, data, len);
return 0;
}
@@ -673,6 +792,27 @@ static inline void get_temp_setpoint(int threshold, u8 *val)
*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
}
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+ switch (delta) {
+ case 2:
+ *val = BTEMP_DELTA_2K;
+ break;
+ case 4:
+ *val = BTEMP_DELTA_4K;
+ break;
+ case 6:
+ *val = BTEMP_DELTA_6K;
+ break;
+ case 10:
+ *val = BTEMP_DELTA_10K;
+ break;
+ default:
+ *val = BTEMP_DELTA_2K;
+ break;
+ };
+}
+
static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
int flags)
{
@@ -739,38 +879,527 @@ static bool is_charger_available(struct fg_chip *chip)
return true;
}
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ if (chip->battery_missing || !chip->cl.learned_cc_uah)
+ return -EPERM;
+
+ cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+ /* Write to a backup register to use across reboot */
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing act_batt_cap_bkup, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Write to actual capacity register for coulomb counter operation */
+ rc = fg_sram_write(chip, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET,
+ (u8 *)&cc_mah, chip->sp[FG_SRAM_ACT_BATT_CAP].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+ chip->cl.learned_cc_uah, cc_mah);
+ return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT 500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+ int rc, act_cap_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.learned_cc_uah = act_cap_mah * 1000;
+
+ if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+ if (chip->cl.learned_cc_uah == 0)
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+ delta_cc_uah = abs(chip->cl.learned_cc_uah -
+ chip->cl.nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range by 50% from the
+ * nominal capacity, then overwrite the learned capacity with
+ * the nominal capacity.
+ */
+ if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected, capping it to nominal: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+ }
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+ return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+ int rc, batt_temp;
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return false;
+ }
+
+ if (batt_temp > chip->dt.cl_max_temp ||
+ batt_temp < chip->dt.cl_min_temp) {
+ fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+ batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+ return false;
+ }
+
+ return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+ int rc;
+
+ max_inc_val = chip->cl.learned_cc_uah
+ * (1000 + chip->dt.cl_max_cap_inc);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->cl.learned_cc_uah
+ * (1000 - chip->dt.cl_max_cap_dec);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->cl.learned_cc_uah;
+ if (chip->cl.final_cc_uah > max_inc_val)
+ chip->cl.learned_cc_uah = max_inc_val;
+ else if (chip->cl.final_cc_uah < min_dec_val)
+ chip->cl.learned_cc_uah = min_dec_val;
+ else
+ chip->cl.learned_cc_uah =
+ chip->cl.final_cc_uah;
+
+ if (chip->dt.cl_max_cap_limit) {
+ max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+ chip->dt.cl_max_cap_limit);
+ do_div(max_inc_val, 1000);
+ if (chip->cl.final_cc_uah > max_inc_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+ chip->cl.final_cc_uah, max_inc_val);
+ chip->cl.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->dt.cl_min_cap_limit) {
+ min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+ chip->dt.cl_min_cap_limit);
+ do_div(min_dec_val, 1000);
+ if (chip->cl.final_cc_uah < min_dec_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+ chip->cl.final_cc_uah, min_dec_val);
+ chip->cl.learned_cc_uah = min_dec_val;
+ }
+ }
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+ fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw, cc_soc_delta_pct;
+ int64_t delta_cc_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+ abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+ CC_SOC_30BIT);
+ delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+ 100);
+ chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+ fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+ return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+ int rc, cc_soc_sw;
+
+ if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+ chip->dt.cl_start_soc) {
+ fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+ batt_soc);
+ return -EINVAL;
+ }
+
+ chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+ FULL_SOC_RAW);
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.init_cc_soc_sw = cc_soc_sw;
+ chip->cl.active = true;
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+ batt_soc, chip->cl.init_cc_soc_sw);
+ return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw;
+
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc < 0) {
+ pr_err("Error in processing cap learning full data, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* Write a FULL value to cc_soc_sw */
+ cc_soc_sw = CC_SOC_30BIT;
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_cap_learning_post_process(chip);
+out:
+ return rc;
+}
+
+#define FULL_SOC_RAW 255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+ int rc, batt_soc;
+
+ mutex_lock(&chip->cl.lock);
+
+ if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+ chip->battery_missing) {
+ fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+ chip->cl.learned_cc_uah);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ goto out;
+ }
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
+ fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+ chip->status, chip->cl.active, batt_soc);
+
+ /* Initialize the starting point of learning capacity */
+ if (!chip->cl.active) {
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_cap_learning_begin(chip, batt_soc);
+ chip->cl.active = (rc == 0);
+ }
+
+ } else {
+ if (chip->charge_done) {
+ rc = fg_cap_learning_done(chip);
+ if (rc < 0)
+ pr_err("Error in completing capacity learning, rc=%d\n",
+ rc);
+
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+ }
+
+out:
+ mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT 1500
+#define KI_COEFF_HI_DISCHG_DEFAULT 2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+ int rc, i, msoc;
+ int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+ int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+ u8 val;
+
+ if (!chip->ki_coeff_dischg_en)
+ return 0;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+ if (msoc < chip->dt.ki_coeff_soc[i]) {
+ ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+ ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+ }
+ }
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+ ki_coeff_med, ki_coeff_hi);
+ return 0;
+}
+
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc, msoc, bsoc, recharge_soc;
+ u8 full_soc[2] = {0xFF, 0xFF};
+
+ if (!chip->dt.hold_soc_while_full)
+ return 0;
+
+ if (!is_charger_available(chip))
+ return 0;
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting battery health, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->health = prop.intval;
+ recharge_soc = chip->dt.recharge_soc_thr;
+ recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+ FULL_CAPACITY);
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* We need 2 most significant bytes here */
+ bsoc = (u32)bsoc >> 16;
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "msoc: %d health: %d status: %d\n", msoc,
+ chip->health, chip->status);
+ if (chip->charge_done) {
+ if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD)
+ chip->charge_full = true;
+ else
+ fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+ msoc);
+ } else if ((bsoc >> 8) <= recharge_soc) {
+ fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d\n",
+ bsoc >> 8, recharge_soc);
+ chip->charge_full = false;
+ }
+
+ if (!chip->charge_full)
+ return 0;
+
+ /*
+ * During JEITA conditions, charge_full can happen early. FULL_SOC
+ * and MONOTONIC_SOC needs to be updated to reflect the same. Write
+ * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
+ */
+ rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
+ FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write full_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+ full_soc, 2, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write monotonic_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
+ return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+ u8 buf[4];
+ int rc;
+
+ fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, buf,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+ int rc, msoc, recharge_soc, new_recharge_soc = 0;
+
+ recharge_soc = chip->dt.recharge_soc_thr;
+ /*
+ * If the input is present and charging had been terminated, adjust
+ * the recharge SOC threshold based on the monotonic SOC at which
+ * the charge termination had happened.
+ */
+ if (is_input_present(chip) && !chip->recharge_soc_adjusted
+ && chip->charge_done) {
+ /* Get raw monotonic SOC for calculation */
+ rc = fg_get_msoc_raw(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n", rc);
+ return rc;
+ }
+
+ msoc = DIV_ROUND_CLOSEST(msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /* Adjust the recharge_soc threshold */
+ new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
+ } else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ /* Restore the default value */
+ new_recharge_soc = recharge_soc;
+ }
+
+ if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
+ rc = fg_set_recharge_soc(chip, new_recharge_soc);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->recharge_soc_adjusted = (new_recharge_soc !=
+ recharge_soc);
+ fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
+ new_recharge_soc);
+ }
+
+ return 0;
+}
+
static void status_change_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
struct fg_chip, status_change_work);
union power_supply_propval prop = {0, };
+ int rc;
if (!is_charger_available(chip)) {
fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- return;
+ goto out;
}
- power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
&prop);
- chip->prev_status = chip->status;
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
+
chip->status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
- if (chip->cyc_ctr.en && chip->prev_status != chip->status)
+ chip->charge_done = prop.intval;
+ fg_dbg(chip, FG_POWER_SUPPLY, "curr_status:%d charge_done: %d\n",
+ chip->status, chip->charge_done);
+
+ if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
- switch (prop.intval) {
- case POWER_SUPPLY_STATUS_CHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Charging\n");
- break;
- case POWER_SUPPLY_STATUS_DISCHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Discharging\n");
- break;
- case POWER_SUPPLY_STATUS_FULL:
- fg_dbg(chip, FG_POWER_SUPPLY, "Full\n");
- break;
- default:
- break;
- }
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_soc(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+out:
+ pm_relax(chip->dev);
}
static void restore_cycle_counter(struct fg_chip *chip)
@@ -853,6 +1482,9 @@ static void cycle_count_work(struct work_struct *work)
goto out;
}
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
/* Find out which bucket the SOC falls in */
bucket = batt_soc / BUCKET_SOC_PCT;
@@ -912,64 +1544,84 @@ static int fg_get_cycle_count(struct fg_chip *chip)
return count;
}
-#define PROFILE_COMP_LEN 32
-#define SOC_READY_WAIT_MS 2000
-static void profile_load_work(struct work_struct *work)
+static void dump_sram(u8 *buf, int len)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- profile_load_work.work);
- int rc;
- u8 buf[PROFILE_COMP_LEN], val;
- bool tried_again = false, profiles_same = false;
+ int i;
+ char str[16];
- if (!chip->batt_id_avail) {
- pr_err("batt_id not available\n");
- return;
+ for (i = 0; i < len; i += 4) {
+ str[0] = '\0';
+ fill_string(str, sizeof(str), buf + i, 4);
+ pr_info("%03d %s\n", PROFILE_LOAD_WORD + (i / 4), str);
}
+}
+
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+ u8 buf[PROFILE_COMP_LEN], val;
+ bool profiles_same = false;
+ int rc;
rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("failed to read profile integrity rc=%d\n", rc);
- return;
+ return false;
}
- vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ /* Check if integrity bit is set */
if (val == 0x01) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading battery profile, rc:%d\n", rc);
- goto out;
+ return false;
}
profiles_same = memcmp(chip->batt_profile, buf,
PROFILE_COMP_LEN) == 0;
if (profiles_same) {
- fg_dbg(chip, FG_STATUS, "Battery profile is same\n");
- goto done;
+ fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+ return false;
}
- fg_dbg(chip, FG_STATUS, "profiles are different?\n");
- }
- clear_cycle_counter(chip);
- fg_dbg(chip, FG_STATUS, "profile loading started\n");
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0) {
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
- goto out;
+ if (!chip->dt.force_load_profile) {
+ pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+ if (fg_sram_dump) {
+ pr_info("FG: loaded profile:\n");
+ dump_sram(buf, PROFILE_COMP_LEN);
+ pr_info("FG: available profile:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
+ return false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+ } else {
+ fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+ if (fg_sram_dump) {
+ pr_info("FG: profile to be loaded:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
}
+ return true;
+}
- /* load battery profile */
- rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
- chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+#define SOC_READY_WAIT_MS 2000
+static int __fg_restart(struct fg_chip *chip)
+{
+ int rc, msoc;
+ bool tried_again = false;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
if (rc < 0) {
- pr_err("Error in writing battery profile, rc:%d\n", rc);
- goto out;
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
}
+ chip->last_soc = msoc;
+ chip->fg_restarting = true;
+ reinit_completion(&chip->soc_ready);
rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
RESTART_GO_BIT);
if (rc < 0) {
@@ -991,6 +1643,88 @@ wait:
goto out;
}
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+out:
+ chip->fg_restarting = false;
+ return rc;
+}
+
+static void fg_notify_charger(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc;
+
+ if (!is_charger_available(chip)) {
+ pr_warn("Charger not available yet?\n");
+ return;
+ }
+
+ prop.intval = chip->bp.float_volt_uv;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+ if (rc < 0) {
+ pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
+ rc);
+ return;
+ }
+
+ prop.intval = chip->bp.fastchg_curr_ma * 1000;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
+ if (rc < 0) {
+ pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
+ rc);
+ return;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ profile_load_work.work);
+ u8 buf[2], val;
+ int rc;
+
+ vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ if (!is_profile_load_required(chip))
+ goto done;
+
+ clear_cycle_counter(chip);
+ mutex_lock(&chip->cl.lock);
+ chip->cl.learned_cc_uah = 0;
+ chip->cl.active = false;
+ mutex_unlock(&chip->cl.lock);
+
+ fg_dbg(chip, FG_STATUS, "profile loading started\n");
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+
+ /* load battery profile */
+ rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+ chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing battery profile, rc:%d\n", rc);
+ goto out;
+ }
+
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ goto out;
+ }
+
fg_dbg(chip, FG_STATUS, "SOC is ready\n");
/* Set the profile integrity bit */
@@ -1002,26 +1736,68 @@ wait:
goto out;
}
- fg_dbg(chip, FG_STATUS, "profile loaded successfully");
done:
rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
NOM_CAP_OFFSET, rc);
- goto out;
+ } else {
+ chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+ rc = fg_load_learned_cap_from_sram(chip);
+ if (rc < 0)
+ pr_err("Error in loading capacity learning data, rc:%d\n",
+ rc);
}
- chip->nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+ fg_notify_charger(chip);
chip->profile_loaded = true;
+ fg_dbg(chip, FG_STATUS, "profile loaded successfully");
out:
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0)
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
}
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_restart: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_restart != 1) {
+ pr_err("Bad value %d\n", fg_restart);
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info("FG restart done\n");
+ return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_sysfs,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -1033,7 +1809,10 @@ static int fg_psy_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
- rc = fg_get_prop_capacity(chip, &pval->intval);
+ if (chip->fg_restarting)
+ pval->intval = chip->last_soc;
+ else
+ rc = fg_get_prop_capacity(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = fg_get_battery_voltage(chip, &pval->intval);
@@ -1051,7 +1830,7 @@ static int fg_psy_get_property(struct power_supply *psy,
rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- pval->intval = chip->nom_cap_uah;
+ pval->intval = chip->cl.nom_cap_uah;
break;
case POWER_SUPPLY_PROP_RESISTANCE_ID:
rc = fg_get_batt_id(chip, &pval->intval);
@@ -1061,12 +1840,25 @@ static int fg_psy_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
pval->intval = chip->bp.float_volt_uv;
+ break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
pval->intval = fg_get_cycle_count(chip);
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
pval->intval = chip->cyc_ctr.id;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ rc = fg_get_cc_soc(chip, &pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ pval->intval = chip->cl.init_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ pval->intval = chip->cl.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = fg_get_cc_soc_sw(chip, &pval->intval);
+ break;
default:
break;
}
@@ -1125,8 +1917,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+ || (strcmp(psy->desc->name, "usb") == 0)) {
+ /*
+ * We cannot vote for awake votable here as that takes
+ * a mutex lock and this is executed in an atomic context.
+ */
+ pm_stay_awake(chip->dev);
schedule_work(&chip->status_change_work);
+ }
return NOTIFY_OK;
}
@@ -1144,6 +1942,10 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -1182,6 +1984,32 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ /* This SRAM register is only present in v2.0 */
+ if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4 &&
+ chip->bp.float_volt_uv > 0) {
+ fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+ chip->bp.float_volt_uv / 1000, buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+ chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+ chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing float_volt, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (chip->bp.vbatt_full_mv > 0) {
+ fg_encode(chip->sp, FG_SRAM_VBATT_FULL, chip->bp.vbatt_full_mv,
+ buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+ chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+ chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -1230,16 +2058,9 @@ static int fg_hw_init(struct fg_chip *chip)
}
if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
- fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR,
- chip->dt.recharge_soc_thr, buf);
- rc = fg_sram_write(chip,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte,
- buf, chip->sp[FG_SRAM_RECHARGE_SOC_THR].len,
- FG_IMA_DEFAULT);
+ rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
if (rc < 0) {
- pr_err("Error in writing recharge_soc_thr, rc=%d\n",
- rc);
+ pr_err("Error in setting recharge_soc, rc=%d\n", rc);
return rc;
}
}
@@ -1303,36 +2124,32 @@ static int fg_hw_init(struct fg_chip *chip)
if (chip->cyc_ctr.en)
restore_cycle_counter(chip);
- return 0;
-}
-
-static int fg_memif_init(struct fg_chip *chip)
-{
- return fg_ima_init(chip);
-}
-
-static int fg_batt_profile_init(struct fg_chip *chip)
-{
- int rc;
-
- if (!chip->batt_profile) {
- chip->batt_profile = devm_kcalloc(chip->dev, PROFILE_LEN,
- sizeof(*chip->batt_profile),
- GFP_KERNEL);
- if (!chip->batt_profile)
- return -ENOMEM;
+ if (chip->dt.jeita_hyst_temp >= 0) {
+ val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+ JEITA_TEMP_HYST_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+ return rc;
+ }
}
- rc = fg_get_batt_profile(chip);
+ get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+ CHANGE_THOLD_MASK, val);
if (rc < 0) {
- pr_err("Error in getting battery profile, rc:%d\n", rc);
+ pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
return rc;
}
- schedule_delayed_work(&chip->profile_load_work, msecs_to_jiffies(0));
return 0;
}
+static int fg_memif_init(struct fg_chip *chip)
+{
+ return fg_ima_init(chip);
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
@@ -1360,16 +2177,16 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
chip->battery_missing = (status & BT_MISS_BIT);
if (chip->battery_missing) {
- chip->batt_id_avail = false;
+ chip->profile_available = false;
chip->profile_loaded = false;
clear_cycle_counter(chip);
} else {
- rc = fg_batt_profile_init(chip);
+ rc = fg_get_batt_profile(chip);
if (rc < 0) {
- pr_err("Error in initializing battery profile, rc=%d\n",
- rc);
+ pr_err("Error in getting battery profile, rc:%d\n", rc);
return IRQ_HANDLED;
}
+ schedule_delayed_work(&chip->profile_load_work, 0);
}
return IRQ_HANDLED;
@@ -1378,8 +2195,33 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!is_charger_available(chip)) {
+ chip->last_batt_temp = batt_temp;
+ return IRQ_HANDLED;
+ }
+
+ power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ chip->health = prop.intval;
+
+ if (chip->last_batt_temp != batt_temp) {
+ chip->last_batt_temp = batt_temp;
+ power_supply_changed(chip->batt_psy);
+ }
+
+ if (abs(chip->last_batt_temp - batt_temp) > 30)
+ pr_warn("Battery temperature last:%d current: %d\n",
+ chip->last_batt_temp, batt_temp);
return IRQ_HANDLED;
}
@@ -1404,6 +2246,7 @@ static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ int rc;
if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
@@ -1412,6 +2255,18 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
power_supply_changed(chip->batt_psy);
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+
+ if (chip->cl.active)
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
return IRQ_HANDLED;
}
@@ -1445,39 +2300,79 @@ static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
/* BATT_SOC irqs */
[MSOC_FULL_IRQ] = {
- "msoc-full", fg_soc_irq_handler, true },
+ .name = "msoc-full",
+ .handler = fg_soc_irq_handler,
+ },
[MSOC_HIGH_IRQ] = {
- "msoc-high", fg_soc_irq_handler, true },
+ .name = "msoc-high",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_EMPTY_IRQ] = {
- "msoc-empty", fg_empty_soc_irq_handler, true },
+ .name = "msoc-empty",
+ .handler = fg_empty_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_LOW_IRQ] = {
- "msoc-low", fg_soc_irq_handler },
+ .name = "msoc-low",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_DELTA_IRQ] = {
- "msoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "msoc-delta",
+ .handler = fg_delta_soc_irq_handler,
+ .wakeable = true,
+ },
[BSOC_DELTA_IRQ] = {
- "bsoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "bsoc-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[SOC_READY_IRQ] = {
- "soc-ready", fg_first_est_irq_handler, true },
+ .name = "soc-ready",
+ .handler = fg_first_est_irq_handler,
+ .wakeable = true,
+ },
[SOC_UPDATE_IRQ] = {
- "soc-update", fg_soc_update_irq_handler },
+ .name = "soc-update",
+ .handler = fg_soc_update_irq_handler,
+ },
/* BATT_INFO irqs */
[BATT_TEMP_DELTA_IRQ] = {
- "batt-temp-delta", fg_delta_batt_temp_irq_handler },
+ .name = "batt-temp-delta",
+ .handler = fg_delta_batt_temp_irq_handler,
+ .wakeable = true,
+ },
[BATT_MISSING_IRQ] = {
- "batt-missing", fg_batt_missing_irq_handler, true },
+ .name = "batt-missing",
+ .handler = fg_batt_missing_irq_handler,
+ .wakeable = true,
+ },
[ESR_DELTA_IRQ] = {
- "esr-delta", fg_dummy_irq_handler },
+ .name = "esr-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[VBATT_LOW_IRQ] = {
- "vbatt-low", fg_vbatt_low_irq_handler, true },
+ .name = "vbatt-low",
+ .handler = fg_vbatt_low_irq_handler,
+ .wakeable = true,
+ },
[VBATT_PRED_DELTA_IRQ] = {
- "vbatt-pred-delta", fg_dummy_irq_handler },
+ .name = "vbatt-pred-delta",
+ .handler = fg_dummy_irq_handler,
+ },
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
- "dma-grant", fg_dummy_irq_handler },
+ .name = "dma-grant",
+ .handler = fg_dummy_irq_handler,
+ },
[MEM_XCP_IRQ] = {
- "mem-xcp", fg_dummy_irq_handler },
+ .name = "mem-xcp",
+ .handler = fg_dummy_irq_handler,
+ },
[IMA_RDY_IRQ] = {
- "ima-rdy", fg_dummy_irq_handler },
+ .name = "ima-rdy",
+ .handler = fg_dummy_irq_handler,
+ },
};
static int fg_get_irq_index_byname(const char *name)
@@ -1532,6 +2427,73 @@ static int fg_register_interrupts(struct fg_chip *chip)
return 0;
}
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+ struct device_node *node = chip->dev->of_node;
+ int rc, i;
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
+ chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
+ chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
+ chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+ if (chip->dt.ki_coeff_soc[i] < 0 ||
+ chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+ pr_err("Error in ki_coeff_soc_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+ }
+ chip->ki_coeff_dischg_en = true;
+ return 0;
+}
+
#define DEFAULT_CUTOFF_VOLT_MV 3200
#define DEFAULT_EMPTY_VOLT_MV 3100
#define DEFAULT_CHG_TERM_CURR_MA 100
@@ -1542,12 +2504,21 @@ static int fg_register_interrupts(struct fg_chip *chip)
#define DEFAULT_BATT_TEMP_COOL 5
#define DEFAULT_BATT_TEMP_WARM 45
#define DEFAULT_BATT_TEMP_HOT 50
+#define DEFAULT_CL_START_SOC 15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC 450
+#define DEFAULT_CL_MAX_INC_DECIPERC 5
+#define DEFAULT_CL_MAX_DEC_DECIPERC 100
+#define DEFAULT_CL_MIN_LIM_DECIPERC 0
+#define DEFAULT_CL_MAX_LIM_DECIPERC 0
+#define BTEMP_DELTA_LOW 2
+#define BTEMP_DELTA_HIGH 10
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
u32 base, temp;
u8 subtype;
- int rc, len;
+ int rc;
if (!node) {
dev_err(chip->dev, "device tree node missing\n");
@@ -1638,6 +2609,11 @@ static int fg_parse_dt(struct fg_chip *chip)
}
}
+ rc = fg_get_batt_profile(chip);
+ if (rc < 0)
+ pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+ chip->batt_id, rc);
+
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
if (rc < 0)
@@ -1691,15 +2667,14 @@ static int fg_parse_dt(struct fg_chip *chip)
chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
- if (of_find_property(node, "qcom,fg-jeita-thresholds", &len)) {
- if (len == NUM_JEITA_LEVELS) {
- rc = of_property_read_u32_array(node,
- "qcom,fg-jeita-thresholds",
- chip->dt.jeita_thresholds, len);
- if (rc < 0)
- pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
- rc);
- }
+ if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+ sizeof(u32)) == NUM_JEITA_LEVELS) {
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-jeita-thresholds",
+ chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+ if (rc < 0)
+ pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+ rc);
}
rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
@@ -1724,6 +2699,70 @@ static int fg_parse_dt(struct fg_chip *chip)
if (chip->cyc_ctr.en)
chip->cyc_ctr.id = 1;
+ chip->dt.force_load_profile = of_property_read_bool(node,
+ "qcom,fg-force-load-profile");
+
+ rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+ if (rc < 0)
+ chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+ else
+ chip->dt.cl_start_soc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_min_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_max_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_inc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_dec = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+ else
+ chip->dt.cl_min_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+ else
+ chip->dt.cl_max_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+ if (rc < 0)
+ chip->dt.jeita_hyst_temp = -EINVAL;
+ else
+ chip->dt.jeita_hyst_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+ if (rc < 0)
+ chip->dt.batt_temp_delta = -EINVAL;
+ else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+ chip->dt.batt_temp_delta = temp;
+
+ chip->dt.hold_soc_while_full = of_property_read_bool(node,
+ "qcom,hold-soc-while-full");
+
+ rc = fg_parse_ki_coefficients(chip);
+ if (rc < 0)
+ pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+
return 0;
}
@@ -1776,6 +2815,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->bus_lock);
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->cl.lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -1836,10 +2876,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
- rc = fg_batt_profile_init(chip);
- if (rc < 0)
- dev_warn(chip->dev, "Error in initializing battery profile, rc:%d\n",
- rc);
+ if (chip->profile_available)
+ schedule_delayed_work(&chip->profile_load_work, 0);
device_init_wakeup(chip->dev, true);
pr_debug("FG GEN3 driver successfully probed\n");
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 57f31d8c58e7..93965dbe99ae 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -58,6 +59,13 @@ static struct smb_params v1_params = {
.max_u = 4800000,
.step_u = 25000,
},
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 250000,
+ .max_u = 2000000,
+ .step_u = 250000,
+ },
.dc_icl = {
.name = "dc input current limit",
.reg = DCIN_CURRENT_LIMIT_CFG_REG,
@@ -202,18 +210,22 @@ struct smb_dt_props {
bool no_battery;
int fcc_ua;
int usb_icl_ua;
+ int otg_cl_ua;
int dc_icl_ua;
int fv_uv;
int wipower_max_uw;
u32 step_soc_threshold[STEP_CHARGING_MAX_STEPS - 1];
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
+ int float_option;
+ bool hvdcp_disable;
};
struct smb2 {
- struct smb_charger chg;
- struct smb_dt_props dt;
- bool bad_part;
+ struct smb_charger chg;
+ struct dentry *dfs_root;
+ struct smb_dt_props dt;
+ bool bad_part;
};
static int __debug_mask;
@@ -221,11 +233,7 @@ module_param_named(
debug_mask, __debug_mask, int, S_IRUSR | S_IWUSR
);
-static int __pl_master_percent = 50;
-module_param_named(
- pl_master_percent, __pl_master_percent, int, S_IRUSR | S_IWUSR
-);
-
+#define MICRO_1P5A 1500000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -278,6 +286,11 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
+ "qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+ if (rc < 0)
+ chip->dt.otg_cl_ua = MICRO_1P5A;
+
+ rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
@@ -306,6 +319,15 @@ static int smb2_parse_dt(struct smb2 *chip)
}
}
+ of_property_read_u32(node, "qcom,float-option", &chip->dt.float_option);
+ if (chip->dt.float_option < 0 || chip->dt.float_option > 4) {
+ pr_err("qcom,float-option is out of range [0, 4]\n");
+ return -EINVAL;
+ }
+
+ chip->dt.hvdcp_disable = of_property_read_bool(node,
+ "qcom,hvdcp-disable");
+
return 0;
}
@@ -319,6 +341,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
POWER_SUPPLY_PROP_TYPE,
POWER_SUPPLY_PROP_TYPEC_MODE,
@@ -328,6 +351,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_PE_START,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -357,6 +381,9 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_usb_voltage_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+ rc = smblib_get_prop_pd_current_max(chg, val);
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
rc = smblib_get_prop_usb_current_max(chg, val);
break;
@@ -390,6 +417,15 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
rc = smblib_get_prop_usb_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+ rc = smblib_get_prop_pd_in_hard_reset(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+ val->intval = chg->system_suspend_supported;
+ break;
+ case POWER_SUPPLY_PROP_PE_START:
+ rc = smblib_get_pe_start(chg, val);
+ break;
default:
pr_err("get prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -417,23 +453,24 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_prop_usb_voltage_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_CURRENT_MAX:
+ rc = smblib_set_prop_pd_current_max(chg, val);
+ break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
rc = smblib_set_prop_usb_current_max(chg, val);
break;
- case POWER_SUPPLY_PROP_TYPE:
- if (chg->pd_active && val->intval == POWER_SUPPLY_TYPE_USB_PD) {
- chg->usb_psy_desc.type = val->intval;
- } else {
- pr_err("set type %d not allowed\n", val->intval);
- rc = -EINVAL;
- }
- break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
rc = smblib_set_prop_typec_power_role(chg, val);
break;
case POWER_SUPPLY_PROP_PD_ACTIVE:
rc = smblib_set_prop_pd_active(chg, val);
break;
+ case POWER_SUPPLY_PROP_PD_IN_HARD_RESET:
+ rc = smblib_set_prop_pd_in_hard_reset(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED:
+ chg->system_suspend_supported = val->intval;
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -600,11 +637,16 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
+ POWER_SUPPLY_PROP_CHARGE_DONE,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+ POWER_SUPPLY_PROP_PARALLEL_PERCENT,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -644,6 +686,7 @@ static int smb2_batt_get_prop(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
rc = smblib_get_prop_input_current_limited(chg, val);
+ break;
case POWER_SUPPLY_PROP_STEP_CHARGING_ENABLED:
val->intval = chg->step_chg_enabled;
break;
@@ -653,15 +696,32 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = smblib_get_prop_batt_voltage_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
+ break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
rc = smblib_get_prop_batt_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = get_client_vote(chg->fcc_max_votable,
+ DEFAULT_VOTER);
+ break;
case POWER_SUPPLY_PROP_TEMP:
rc = smblib_get_prop_batt_temp(chg, val);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ rc = smblib_get_prop_batt_charge_done(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ val->intval = get_client_vote(chg->pl_disable_votable,
+ USER_VOTER);
+ break;
+ case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
+ val->intval = chg->pl.slave_pct;
+ break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
@@ -692,6 +752,21 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_set_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
+ if (val->intval < 0 || val->intval > 100)
+ return -EINVAL;
+ chg->pl.slave_pct = val->intval;
+ rerun_election(chg->fcc_votable);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ vote(chg->fcc_max_votable, DEFAULT_VOTER, true, val->intval);
+ break;
default:
rc = -EINVAL;
}
@@ -706,6 +781,8 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_SUSPEND:
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
return 1;
default:
break;
@@ -974,11 +1051,15 @@ static int smb2_init_hw(struct smb2 *chip)
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ chg->otg_cl_ua = chip->dt.otg_cl_ua;
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
- USBIN_ICL_VOTER, true, 0);
+ PL_INDIRECT_VOTER, true, 0);
vote(chg->pl_disable_votable,
CHG_STATE_VOTER, true, 0);
+ vote(chg->pl_disable_votable,
+ PARALLEL_PSY_VOTER, true, 0);
vote(chg->usb_suspend_votable,
DEFAULT_VOTER, chip->dt.no_battery, 0);
vote(chg->dc_suspend_votable,
@@ -991,15 +1072,19 @@ static int smb2_init_hw(struct smb2 *chip)
DEFAULT_VOTER, true, chip->dt.usb_icl_ua);
vote(chg->dc_icl_votable,
DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
-
- /*
- * Configure charge enable for software control; active high, and end
- * the charge cycle while the battery is OV.
- */
+ vote(chg->hvdcp_disable_votable, DEFAULT_VOTER,
+ chip->dt.hvdcp_disable, 0);
+ vote(chg->hvdcp_disable_votable, PD_INACTIVE_VOTER,
+ true, 0);
+ vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
+ true, 0);
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+ true, 0);
+
+ /* Configure charge enable for software control; active high */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
CHG_EN_POLARITY_BIT |
- CHG_EN_SRC_BIT |
- BAT_OV_ECC_BIT, BAT_OV_ECC_BIT);
+ CHG_EN_SRC_BIT, 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't configure charger rc=%d\n", rc);
return rc;
@@ -1092,11 +1177,41 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* configure float charger options */
+ switch (chip->dt.float_option) {
+ case 1:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, 0);
+ break;
+ case 2:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+ break;
+ case 3:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+ break;
+ case 4:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
static int smb2_setup_wa_flags(struct smb2 *chip)
{
+ struct smb_charger *chg = &chip->chg;
struct pmic_revid_data *pmic_rev_id;
struct device_node *revid_dev_node;
@@ -1119,6 +1234,8 @@ static int smb2_setup_wa_flags(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMICOBALT_SUBTYPE:
+ if (pmic_rev_id->rev4 == PMICOBALT_V1P1_REV4) /* PMI rev 1.1 */
+ chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
break;
default:
pr_err("PMIC subtype %d not supported\n",
@@ -1284,7 +1401,8 @@ static struct smb2_irq_info smb2_irqs[] = {
},
{
.name = "dcin-plugin",
- .handler = smblib_handle_debug,
+ .handler = smblib_handle_dc_plugin,
+ .wake = true,
},
{
.name = "div2-en-dg",
@@ -1408,9 +1526,74 @@ static int smb2_request_interrupts(struct smb2 *chip)
return rc;
}
-/*********
- * PROBE *
- *********/
+#if defined(CONFIG_DEBUG_FS)
+
+static int force_batt_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->batt_psy);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_batt_psy_update_ops, NULL,
+ force_batt_psy_update_write, "0x%02llx\n");
+
+static int force_usb_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->usb_psy);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_usb_psy_update_ops, NULL,
+ force_usb_psy_update_write, "0x%02llx\n");
+
+static int force_dc_psy_update_write(void *data, u64 val)
+{
+ struct smb_charger *chg = data;
+
+ power_supply_changed(chg->dc_psy);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dc_psy_update_ops, NULL,
+ force_dc_psy_update_write, "0x%02llx\n");
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{
+ struct dentry *file;
+
+ chip->dfs_root = debugfs_create_dir("charger", NULL);
+ if (IS_ERR_OR_NULL(chip->dfs_root)) {
+ pr_err("Couldn't create charger debugfs rc=%ld\n",
+ (long)chip->dfs_root);
+ return;
+ }
+
+ file = debugfs_create_file("force_batt_psy_update", S_IRUSR | S_IWUSR,
+ chip->dfs_root, chip, &force_batt_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_batt_psy_update file rc=%ld\n",
+ (long)file);
+
+ file = debugfs_create_file("force_usb_psy_update", S_IRUSR | S_IWUSR,
+ chip->dfs_root, chip, &force_usb_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_usb_psy_update file rc=%ld\n",
+ (long)file);
+
+ file = debugfs_create_file("force_dc_psy_update", S_IRUSR | S_IWUSR,
+ chip->dfs_root, chip, &force_dc_psy_update_ops);
+ if (IS_ERR_OR_NULL(file))
+ pr_err("Couldn't create force_dc_psy_update file rc=%ld\n",
+ (long)file);
+}
+
+#else
+
+static void smb2_create_debugfs(struct smb2 *chip)
+{}
+
+#endif
static int smb2_probe(struct platform_device *pdev)
{
@@ -1428,7 +1611,7 @@ static int smb2_probe(struct platform_device *pdev)
chg->param = v1_params;
chg->debug_mask = &__debug_mask;
chg->mode = PARALLEL_MASTER;
- chg->pl.master_percent = &__pl_master_percent;
+ chg->name = "PMI";
chg->regmap = dev_get_regmap(chg->dev->parent, NULL);
if (!chg->regmap) {
@@ -1497,6 +1680,7 @@ static int smb2_probe(struct platform_device *pdev)
return 0;
}
+ chg->pl.slave_pct = 50;
rc = smb2_init_batt_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize batt psy rc=%d\n", rc);
@@ -1522,6 +1706,8 @@ static int smb2_probe(struct platform_device *pdev)
goto cleanup;
}
+ smb2_create_debugfs(chip);
+
pr_info("QPNP SMB2 probed successfully\n");
return rc;
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index e93d03788f11..de4391024970 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
@@ -21,18 +22,24 @@
#include "storm-watch.h"
#include "pmic-voter.h"
+#define smblib_err(chg, fmt, ...) \
+ pr_err("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__) \
+
#define smblib_dbg(chg, reason, fmt, ...) \
do { \
if (*chg->debug_mask & (reason)) \
- dev_info(chg->dev, fmt, ##__VA_ARGS__); \
+ pr_info("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__); \
else \
- dev_dbg(chg->dev, fmt, ##__VA_ARGS__); \
+ pr_debug("%s: %s: " fmt, chg->name, \
+ __func__, ##__VA_ARGS__); \
} while (0)
static bool is_secure(struct smb_charger *chg, int addr)
{
- /* assume everything above 0xC0 is secure */
- return (bool)((addr & 0xFF) >= 0xC0);
+ /* assume everything above 0xA0 is secure */
+ return (bool)((addr & 0xFF) >= 0xA0);
}
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
@@ -84,21 +91,19 @@ unlock:
return rc;
}
-static int smblib_get_step_charging_adjustment(struct smb_charger *chg,
- int *cc_offset)
+static int smblib_get_step_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
{
- int step_state;
- int rc;
+ int rc, step_state;
u8 stat;
if (!chg->step_chg_enabled) {
- *cc_offset = 0;
+ *cc_delta_ua = 0;
return 0;
}
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
rc);
return rc;
}
@@ -106,57 +111,71 @@ static int smblib_get_step_charging_adjustment(struct smb_charger *chg,
step_state = (stat & STEP_CHARGING_STATUS_MASK) >>
STEP_CHARGING_STATUS_SHIFT;
rc = smblib_get_charge_param(chg, &chg->param.step_cc_delta[step_state],
- cc_offset);
+ cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+ return rc;
+ }
- return rc;
+ return 0;
}
-static void smblib_fcc_split_ua(struct smb_charger *chg, int total_fcc,
- int *master_ua, int *slave_ua)
+static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
{
- int rc, cc_reduction_ua = 0;
- int step_cc_delta;
- int master_percent = min(max(*chg->pl.master_percent, 0), 100);
- union power_supply_propval pval = {0, };
- int effective_fcc;
+ int rc, cc_minus_ua;
+ u8 stat;
- /*
- * if master_percent is 0, s/w will configure master's fcc to zero and
- * slave's fcc to the max. However since master's fcc is zero it
- * disables its own charging and as a result the slave's charging is
- * disabled via the fault line.
- */
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ rc);
+ return rc;
+ }
- rc = smblib_get_prop_batt_health(chg, &pval);
- if (rc == 0) {
- if (pval.intval == POWER_SUPPLY_HEALTH_WARM
- || pval.intval == POWER_SUPPLY_HEALTH_COOL) {
- rc = smblib_get_charge_param(chg,
- &chg->param.jeita_cc_comp,
- &cc_reduction_ua);
- if (rc < 0) {
- dev_err(chg->dev, "Could not get jeita comp, rc=%d\n",
- rc);
- cc_reduction_ua = 0;
- }
- }
+ if (!(stat & BAT_TEMP_STATUS_SOFT_LIMIT_MASK)) {
+ *cc_delta_ua = 0;
+ return 0;
}
- rc = smblib_get_step_charging_adjustment(chg, &step_cc_delta);
- if (rc < 0)
- step_cc_delta = 0;
+ rc = smblib_get_charge_param(chg, &chg->param.jeita_cc_comp,
+ &cc_minus_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc minus rc=%d\n", rc);
+ return rc;
+ }
- /*
- * During JEITA condition and with step_charging enabled, PMI will
- * pick the lower of the two value: (FCC - JEITA current compensation)
- * or (FCC + step_charging current delta)
- */
+ *cc_delta_ua = -cc_minus_ua;
+ return 0;
+}
+
+static void smblib_split_fcc(struct smb_charger *chg, int total_ua,
+ int *master_ua, int *slave_ua)
+{
+ int rc, jeita_cc_delta_ua, step_cc_delta_ua, effective_total_ua,
+ slave_limited_ua, hw_cc_delta_ua = 0;
+
+ rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+ step_cc_delta_ua = 0;
+ } else {
+ hw_cc_delta_ua = step_cc_delta_ua;
+ }
+
+ rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+ jeita_cc_delta_ua = 0;
+ } else if (jeita_cc_delta_ua < 0) {
+ /* HW will take the min between JEITA and step charge */
+ hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
+ }
- effective_fcc = min(max(0, total_fcc - cc_reduction_ua),
- max(0, total_fcc + step_cc_delta));
- *master_ua = (effective_fcc * master_percent) / 100;
- *slave_ua = (effective_fcc - *master_ua) * chg->pl.taper_percent / 100;
- *master_ua = max(0, *master_ua + total_fcc - effective_fcc);
+ effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+ slave_limited_ua = min(effective_total_ua, chg->input_limited_fcc_ua);
+ *slave_ua = (slave_limited_ua * chg->pl.slave_pct) / 100;
+ *slave_ua = (*slave_ua * chg->pl.taper_pct) / 100;
+ *master_ua = max(0, total_ua - *slave_ua);
}
/********************
@@ -171,7 +190,7 @@ int smblib_get_charge_param(struct smb_charger *chg,
rc = smblib_read(chg, param->reg, &val_raw);
if (rc < 0) {
- dev_err(chg->dev, "%s: Couldn't read from 0x%04x rc=%d\n",
+ smblib_err(chg, "%s: Couldn't read from 0x%04x rc=%d\n",
param->name, param->reg, rc);
return rc;
}
@@ -193,7 +212,7 @@ int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend)
rc = smblib_read(chg, USBIN_CMD_IL_REG, &temp);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read USBIN_CMD_IL rc=%d\n", rc);
return rc;
}
*suspend = temp & USBIN_SUSPEND_BIT;
@@ -207,47 +226,97 @@ struct apsd_result {
const enum power_supply_type pst;
};
+enum {
+ UNKNOWN,
+ SDP,
+ CDP,
+ DCP,
+ OCP,
+ FLOAT,
+ HVDCP2,
+ HVDCP3,
+ MAX_TYPES
+};
+
static const struct apsd_result const smblib_apsd_results[] = {
- {"UNKNOWN", 0, POWER_SUPPLY_TYPE_UNKNOWN},
- {"SDP", SDP_CHARGER_BIT, POWER_SUPPLY_TYPE_USB},
- {"CDP", CDP_CHARGER_BIT, POWER_SUPPLY_TYPE_USB_CDP},
- {"DCP", DCP_CHARGER_BIT, POWER_SUPPLY_TYPE_USB_DCP},
- {"OCP", OCP_CHARGER_BIT, POWER_SUPPLY_TYPE_USB_DCP},
- {"FLOAT", FLOAT_CHARGER_BIT, POWER_SUPPLY_TYPE_USB_DCP},
- {"HVDCP2", DCP_CHARGER_BIT | QC_2P0_BIT, POWER_SUPPLY_TYPE_USB_HVDCP},
- {"HVDCP3", DCP_CHARGER_BIT | QC_3P0_BIT, POWER_SUPPLY_TYPE_USB_HVDCP_3},
+ [UNKNOWN] = {
+ .name = "UNKNOWN",
+ .bit = 0,
+ .pst = POWER_SUPPLY_TYPE_UNKNOWN
+ },
+ [SDP] = {
+ .name = "SDP",
+ .bit = SDP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB
+ },
+ [CDP] = {
+ .name = "CDP",
+ .bit = CDP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_CDP
+ },
+ [DCP] = {
+ .name = "DCP",
+ .bit = DCP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_DCP
+ },
+ [OCP] = {
+ .name = "OCP",
+ .bit = OCP_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_DCP
+ },
+ [FLOAT] = {
+ .name = "FLOAT",
+ .bit = FLOAT_CHARGER_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_DCP
+ },
+ [HVDCP2] = {
+ .name = "HVDCP2",
+ .bit = DCP_CHARGER_BIT | QC_2P0_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_HVDCP
+ },
+ [HVDCP3] = {
+ .name = "HVDCP3",
+ .bit = DCP_CHARGER_BIT | QC_3P0_BIT,
+ .pst = POWER_SUPPLY_TYPE_USB_HVDCP_3,
+ },
};
static const struct apsd_result *smblib_get_apsd_result(struct smb_charger *chg)
{
int rc, i;
- u8 stat;
+ u8 apsd_stat, stat;
+ const struct apsd_result *result = &smblib_apsd_results[UNKNOWN];
- rc = smblib_read(chg, APSD_STATUS_REG, &stat);
+ rc = smblib_read(chg, APSD_STATUS_REG, &apsd_stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read APSD_STATUS rc=%d\n", rc);
- return &smblib_apsd_results[0];
+ smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
+ return result;
}
- smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
+ smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", apsd_stat);
- if (!(stat & APSD_DTC_STATUS_DONE_BIT))
- return &smblib_apsd_results[0];
+ if (!(apsd_stat & APSD_DTC_STATUS_DONE_BIT))
+ return result;
rc = smblib_read(chg, APSD_RESULT_STATUS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
+ smblib_err(chg, "Couldn't read APSD_RESULT_STATUS rc=%d\n",
rc);
- return &smblib_apsd_results[0];
+ return result;
}
stat &= APSD_RESULT_STATUS_MASK;
for (i = 0; i < ARRAY_SIZE(smblib_apsd_results); i++) {
if (smblib_apsd_results[i].bit == stat)
- return &smblib_apsd_results[i];
+ result = &smblib_apsd_results[i];
+ }
+
+ if (apsd_stat & QC_CHARGER_BIT) {
+ /* since its a qc_charger, either return HVDCP3 or HVDCP2 */
+ if (result != &smblib_apsd_results[HVDCP3])
+ result = &smblib_apsd_results[HVDCP2];
}
- dev_err(chg->dev, "Couldn't find an APSD result for 0x%02x\n", stat);
- return &smblib_apsd_results[0];
+ return result;
}
@@ -267,7 +336,7 @@ int smblib_set_charge_param(struct smb_charger *chg,
return -EINVAL;
} else {
if (val_u > param->max_u || val_u < param->min_u) {
- dev_err(chg->dev, "%s: %d is out of range [%d, %d]\n",
+ smblib_err(chg, "%s: %d is out of range [%d, %d]\n",
param->name, val_u, param->min_u, param->max_u);
return -EINVAL;
}
@@ -277,7 +346,7 @@ int smblib_set_charge_param(struct smb_charger *chg,
rc = smblib_write(chg, param->reg, val_raw);
if (rc < 0) {
- dev_err(chg->dev, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
+ smblib_err(chg, "%s: Couldn't write 0x%02x to 0x%04x rc=%d\n",
param->name, val_raw, param->reg, rc);
return rc;
}
@@ -294,14 +363,14 @@ static int step_charge_soc_update(struct smb_charger *chg, int capacity)
rc = smblib_set_charge_param(chg, &chg->param.step_soc, capacity);
if (rc < 0) {
- dev_err(chg->dev, "Error in updating soc, rc=%d\n", rc);
+ smblib_err(chg, "Error in updating soc, rc=%d\n", rc);
return rc;
}
rc = smblib_write(chg, STEP_CHG_SOC_VBATT_V_UPDATE_REG,
STEP_CHG_SOC_VBATT_V_UPDATE_BIT);
if (rc < 0) {
- dev_err(chg->dev,
+ smblib_err(chg,
"Couldn't set STEP_CHG_SOC_VBATT_V_UPDATE_REG rc=%d\n",
rc);
return rc;
@@ -317,7 +386,7 @@ int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
suspend ? USBIN_SUSPEND_BIT : 0);
if (rc < 0)
- dev_err(chg->dev, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
+ smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
suspend ? "suspend" : "resume", rc);
return rc;
@@ -330,7 +399,7 @@ int smblib_set_dc_suspend(struct smb_charger *chg, bool suspend)
rc = smblib_masked_write(chg, DCIN_CMD_IL_REG, DCIN_SUSPEND_BIT,
suspend ? DCIN_SUSPEND_BIT : 0);
if (rc < 0)
- dev_err(chg->dev, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
+ smblib_err(chg, "Couldn't write %s to DCIN_SUSPEND_BIT rc=%d\n",
suspend ? "suspend" : "resume", rc);
return rc;
@@ -358,14 +427,14 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
} else if (min_allowed_uv < MICRO_12V && max_allowed_uv <= MICRO_12V) {
allowed_voltage = USBIN_ADAPTER_ALLOW_9V_TO_12V;
} else {
- dev_err(chg->dev, "invalid allowed voltage [%d, %d]\n",
+ smblib_err(chg, "invalid allowed voltage [%d, %d]\n",
min_allowed_uv, max_allowed_uv);
return -EINVAL;
}
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
+ smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
allowed_voltage, rc);
return rc;
}
@@ -377,50 +446,44 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
* HELPER FUNCTIONS *
********************/
-static int smblib_update_usb_type(struct smb_charger *chg)
+static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
{
- int rc = 0;
const struct apsd_result *apsd_result;
- /* if PD is active, APSD is disabled so won't have a valid result */
- if (chg->pd_active)
- return rc;
-
- apsd_result = smblib_get_apsd_result(chg);
- chg->usb_psy_desc.type = apsd_result->pst;
- return rc;
+ /*
+ * PD_INACTIVE_VOTER on hvdcp_disable_votable indicates whether
+ * apsd rerun was tried earlier
+ */
+ if (get_client_vote(chg->hvdcp_disable_votable, PD_INACTIVE_VOTER)) {
+ vote(chg->hvdcp_disable_votable, PD_INACTIVE_VOTER, false, 0);
+ /* ensure hvdcp is enabled */
+ if (!get_effective_result(chg->hvdcp_disable_votable)) {
+ apsd_result = smblib_get_apsd_result(chg);
+ if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ /* rerun APSD */
+ smblib_dbg(chg, PR_MISC, "rerun APSD\n");
+ smblib_masked_write(chg, CMD_APSD_REG,
+ APSD_RERUN_BIT,
+ APSD_RERUN_BIT);
+ }
+ }
+ }
+ return 0;
}
-static int smblib_detach_usb(struct smb_charger *chg)
+static const struct apsd_result *smblib_update_usb_type(struct smb_charger *chg)
{
- int rc;
-
- cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
-
- /* reconfigure allowed voltage for HVDCP */
- rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
- USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
- rc);
- return rc;
- }
-
- chg->voltage_min_uv = MICRO_5V;
- chg->voltage_max_uv = MICRO_5V;
+ const struct apsd_result *apsd_result;
- /* clear USB ICL vote for PD_VOTER */
- rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't vote for USB ICL rc=%d\n",
- rc);
- return rc;
+ /* if PD is active, APSD is disabled so won't have a valid result */
+ if (chg->pd_active) {
+ chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+ return 0;
}
- vote(chg->pd_allowed_votable, DEFAULT_VOTER, false, 0);
-
- return rc;
+ apsd_result = smblib_get_apsd_result(chg);
+ chg->usb_psy_desc.type = apsd_result->pst;
+ return apsd_result;
}
static int smblib_notifier_call(struct notifier_block *nb,
@@ -451,7 +514,7 @@ static int smblib_register_notifier(struct smb_charger *chg)
chg->nb.notifier_call = smblib_notifier_call;
rc = power_supply_reg_notifier(&chg->nb);
if (rc < 0) {
- pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ smblib_err(chg, "Couldn't register psy notifier rc = %d\n", rc);
return rc;
}
@@ -521,48 +584,51 @@ static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
- return vote(chg->fcc_votable, FCC_MAX_RESULT, true, fcc_ua);
+ return vote(chg->fcc_votable, FCC_MAX_RESULT_VOTER, true, fcc_ua);
}
static int smblib_fcc_vote_callback(struct votable *votable, void *data,
- int fcc_ua, const char *client)
+ int total_fcc_ua, const char *client)
{
struct smb_charger *chg = data;
- int rc = 0;
union power_supply_propval pval = {0, };
- int master_ua = fcc_ua, slave_ua;
+ int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
- if (fcc_ua < 0) {
- smblib_dbg(chg, PR_MISC, "No Voter\n");
+ if (total_fcc_ua < 0)
return 0;
- }
if (chg->mode == PARALLEL_MASTER
&& !get_effective_result_locked(chg->pl_disable_votable)) {
- smblib_fcc_split_ua(chg, fcc_ua, &master_ua, &slave_ua);
+ smblib_split_fcc(chg, total_fcc_ua, &master_fcc_ua,
+ &slave_fcc_ua);
/*
* parallel charger is not disabled, implying that
* chg->pl.psy exists
*/
- pval.intval = slave_ua;
+ pval.intval = slave_fcc_ua;
rc = power_supply_set_property(chg->pl.psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
if (rc < 0) {
- dev_err(chg->dev, "Could not set parallel fcc, rc=%d\n",
+ smblib_err(chg, "Could not set parallel fcc, rc=%d\n",
rc);
return rc;
}
- chg->pl.slave_fcc = slave_ua;
+ chg->pl.slave_fcc_ua = slave_fcc_ua;
}
- rc = smblib_set_charge_param(chg, &chg->param.fcc, master_ua);
+ rc = smblib_set_charge_param(chg, &chg->param.fcc, master_fcc_ua);
if (rc < 0) {
- dev_err(chg->dev, "Error in setting fcc, rc=%d\n", rc);
+ smblib_err(chg, "Couldn't set master fcc rc=%d\n", rc);
return rc;
}
+ smblib_dbg(chg, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+ master_fcc_ua, slave_fcc_ua,
+ (master_fcc_ua * 100) / total_fcc_ua,
+ (slave_fcc_ua * 100) / total_fcc_ua);
+
return 0;
}
@@ -581,8 +647,7 @@ static int smblib_fv_vote_callback(struct votable *votable, void *data,
rc = smblib_set_charge_param(chg, &chg->param.fv, fv_uv);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't set floating voltage rc=%d\n", rc);
+ smblib_err(chg, "Couldn't set floating voltage rc=%d\n", rc);
return rc;
}
@@ -591,7 +656,7 @@ static int smblib_fv_vote_callback(struct votable *votable, void *data,
rc = power_supply_set_property(chg->pl.psy,
POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
if (rc < 0) {
- dev_err(chg->dev,
+ smblib_err(chg,
"Couldn't set float on parallel rc=%d\n", rc);
return rc;
}
@@ -600,41 +665,67 @@ static int smblib_fv_vote_callback(struct votable *votable, void *data,
return 0;
}
-#define USBIN_25MA 25000
-#define USBIN_100MA 100000
+#define USBIN_25MA 25000
+#define USBIN_100MA 100000
+#define USBIN_150MA 150000
+#define USBIN_500MA 500000
+#define USBIN_900MA 900000
static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
struct smb_charger *chg = data;
int rc = 0;
- bool suspend;
+ bool suspend = (icl_ua < USBIN_25MA);
+ u8 icl_options = 0;
- if (icl_ua < 0) {
- smblib_dbg(chg, PR_MISC, "No Voter hence suspending\n");
- icl_ua = 0;
- }
-
- suspend = (icl_ua < USBIN_25MA);
if (suspend)
- goto suspend;
+ goto out;
- if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB)
- rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
- USB51_MODE_BIT,
- (icl_ua > USBIN_100MA) ? USB51_MODE_BIT : 0);
- else
+ if (chg->usb_psy_desc.type != POWER_SUPPLY_TYPE_USB) {
rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
+ return rc;
+ }
+
+ goto out;
+ }
+
+ /* power source is SDP */
+ switch (icl_ua) {
+ case USBIN_100MA:
+ /* USB 2.0 100mA */
+ icl_options = 0;
+ break;
+ case USBIN_150MA:
+ /* USB 3.0 150mA */
+ icl_options = CFG_USB3P0_SEL_BIT;
+ break;
+ case USBIN_500MA:
+ /* USB 2.0 500mA */
+ icl_options = USB51_MODE_BIT;
+ break;
+ case USBIN_900MA:
+ /* USB 3.0 900mA */
+ icl_options = CFG_USB3P0_SEL_BIT | USB51_MODE_BIT;
+ break;
+ default:
+ smblib_err(chg, "ICL %duA isn't supported for SDP\n", icl_ua);
+ icl_options = 0;
+ break;
+ }
+out:
+ rc = smblib_masked_write(chg, USBIN_ICL_OPTIONS_REG,
+ CFG_USB3P0_SEL_BIT | USB51_MODE_BIT, icl_options);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't set USB input current limit rc=%d\n", rc);
+ smblib_err(chg, "Couldn't set ICL opetions rc=%d\n", rc);
return rc;
}
-suspend:
rc = vote(chg->usb_suspend_votable, PD_VOTER, suspend, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't %s input rc=%d\n",
+ smblib_err(chg, "Couldn't %s input rc=%d\n",
suspend ? "suspend" : "resume", rc);
return rc;
}
@@ -642,6 +733,30 @@ suspend:
return rc;
}
+#define MICRO_250MA 250000
+static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl, otg_cl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg current limit rc=%d\n", rc);
+ return rc;
+ }
+
+ /* configure PFM/PWM mode for OTG regulator */
+ rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
+ ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
+ otg_cl_ua > MICRO_250MA ? 1 : 0);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -660,7 +775,7 @@ static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
rc = smblib_set_charge_param(chg, &chg->param.dc_icl, icl_ua);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't set DC input current limit rc=%d\n",
+ smblib_err(chg, "Couldn't set DC input current limit rc=%d\n",
rc);
return rc;
}
@@ -668,13 +783,25 @@ static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
suspend:
rc = vote(chg->dc_suspend_votable, USER_VOTER, suspend, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't vote to %s DC rc=%d\n",
+ smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
suspend ? "suspend" : "resume", rc);
return rc;
}
return rc;
}
+static int smblib_pd_disallowed_votable_indirect_callback(
+ struct votable *votable, void *data, int disallowed, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ rc = vote(chg->pd_allowed_votable, PD_DISALLOWED_INDIRECT_VOTER,
+ !disallowed, 0);
+
+ return rc;
+}
+
static int smblib_awake_vote_callback(struct votable *votable, void *data,
int awake, const char *client)
{
@@ -698,7 +825,7 @@ static int smblib_pl_disable_vote_callback(struct votable *votable, void *data,
if (chg->mode != PARALLEL_MASTER || !chg->pl.psy)
return 0;
- chg->pl.taper_percent = 100;
+ chg->pl.taper_pct = 100;
rerun_election(chg->fv_votable);
rerun_election(chg->fcc_votable);
@@ -706,11 +833,14 @@ static int smblib_pl_disable_vote_callback(struct votable *votable, void *data,
rc = power_supply_set_property(chg->pl.psy,
POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
if (rc < 0) {
- dev_err(chg->dev,
+ smblib_err(chg,
"Couldn't change slave suspend state rc=%d\n", rc);
return rc;
}
+ smblib_dbg(chg, PR_PARALLEL, "parallel charging %s\n",
+ pl_disable ? "disabled" : "enabled");
+
return 0;
}
@@ -724,25 +854,107 @@ static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
CHARGING_ENABLE_CMD_BIT,
chg_disable ? 0 : CHARGING_ENABLE_CMD_BIT);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't %s charging rc=%d\n",
+ smblib_err(chg, "Couldn't %s charging rc=%d\n",
chg_disable ? "disable" : "enable", rc);
return rc;
}
return 0;
}
+
+static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
+ void *data, int chg_enable, const char *client)
+{
+ struct smb_charger *chg = data;
+
+ vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, !chg_enable, 0);
+
+ return 0;
+}
+
+static int smblib_hvdcp_disable_vote_callback(struct votable *votable,
+ void *data,
+ int hvdcp_disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+ u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT
+ | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT | HVDCP_EN_BIT;
+
+ /*
+ * Disable the autonomous bit and auth bit for disabling hvdcp.
+ * This ensures only qc 2.0 detection runs but no vbus
+ * negotiation happens.
+ */
+ if (hvdcp_disable)
+ val = HVDCP_EN_BIT;
+
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_EN_BIT
+ | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT
+ | HVDCP_AUTH_ALG_EN_CFG_BIT,
+ val);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+ hvdcp_disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smblib_apsd_disable_vote_callback(struct votable *votable,
+ void *data,
+ int apsd_disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ AUTO_SRC_DETECT_BIT,
+ apsd_disable ? 0 : AUTO_SRC_DETECT_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't %s APSD rc=%d\n",
+ apsd_disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return 0;
+}
/*****************
* OTG REGULATOR *
*****************/
+#define OTG_SOFT_START_DELAY_MS 20
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
+ u8 stat;
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0)
- dev_err(chg->dev, "Couldn't enable OTG regulator rc=%d\n", rc);
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ msleep(OTG_SOFT_START_DELAY_MS);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read OTG_STATUS_REG rc=%d\n", rc);
+ return rc;
+ }
+ if (stat & BOOST_SOFTSTART_DONE_BIT)
+ smblib_otg_cl_config(chg, chg->otg_cl_ua);
return rc;
}
@@ -752,9 +964,22 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, 0);
- if (rc < 0)
- dev_err(chg->dev, "Couldn't disable OTG regulator rc=%d\n", rc);
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't disable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ smblib_otg_cl_config(chg, MICRO_250MA);
+
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
@@ -767,7 +992,7 @@ int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev)
rc = smblib_read(chg, CMD_OTG_REG, &cmd);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read CMD_OTG rc=%d", rc);
+ smblib_err(chg, "Couldn't read CMD_OTG rc=%d", rc);
return rc;
}
@@ -790,7 +1015,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
*/
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
stat = stat & CC_ORIENTATION_BIT ? 0 : VCONN_EN_ORIENTATION_BIT;
@@ -798,7 +1023,7 @@ int smblib_vconn_regulator_enable(struct regulator_dev *rdev)
VCONN_EN_VALUE_BIT | VCONN_EN_ORIENTATION_BIT,
VCONN_EN_VALUE_BIT | stat);
if (rc < 0)
- dev_err(chg->dev, "Couldn't enable vconn setting rc=%d\n", rc);
+ smblib_err(chg, "Couldn't enable vconn setting rc=%d\n", rc);
return rc;
}
@@ -811,7 +1036,7 @@ int smblib_vconn_regulator_disable(struct regulator_dev *rdev)
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
VCONN_EN_VALUE_BIT, 0);
if (rc < 0)
- dev_err(chg->dev, "Couldn't disable vconn regulator rc=%d\n",
+ smblib_err(chg, "Couldn't disable vconn regulator rc=%d\n",
rc);
return rc;
@@ -825,7 +1050,7 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &cmd);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
rc);
return rc;
}
@@ -853,8 +1078,7 @@ int smblib_get_prop_batt_present(struct smb_charger *chg,
rc = smblib_read(chg, BATIF_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATIF_INT_RT_STS rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read BATIF_INT_RT_STS rc=%d\n", rc);
return rc;
}
@@ -883,45 +1107,61 @@ int smblib_get_prop_batt_capacity(struct smb_charger *chg,
int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val)
{
- int rc;
- u8 stat;
union power_supply_propval pval = {0, };
+ bool usb_online, dc_online;
+ u8 stat;
+ int rc;
- smblib_get_prop_input_suspend(chg, &pval);
- if (pval.intval) {
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ rc = smblib_get_prop_usb_online(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb online property rc=%d\n",
+ rc);
return rc;
}
+ usb_online = (bool)pval.intval;
- rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ rc = smblib_get_prop_dc_online(chg, &pval);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ smblib_err(chg, "Couldn't get dc online property rc=%d\n",
rc);
return rc;
}
+ dc_online = (bool)pval.intval;
- if (!(stat & (USE_USBIN_BIT | USE_DCIN_BIT)) ||
- !(stat & VALID_INPUT_POWER_SOURCE_BIT)) {
+ if (!usb_online && !dc_online) {
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
return rc;
}
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_1 = 0x%02x\n",
- stat);
stat = stat & BATTERY_CHARGER_STATUS_MASK;
- if (stat >= COMPLETED_CHARGE)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else
+ switch (stat) {
+ case TRICKLE_CHARGE:
+ case PRE_CHARGE:
+ case FAST_CHARGE:
+ case FULLON_CHARGE:
+ case TAPER_CHARGE:
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case DISABLE_CHARGE:
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
- return rc;
+ return 0;
}
int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
@@ -932,12 +1172,10 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
rc);
return rc;
}
- smblib_dbg(chg, PR_REGISTER, "BATTERY_CHARGER_STATUS_1 = 0x%02x\n",
- stat);
switch (stat & BATTERY_CHARGER_STATUS_MASK) {
case TRICKLE_CHARGE:
@@ -961,12 +1199,13 @@ int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val)
{
+ union power_supply_propval pval;
int rc;
u8 stat;
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_2_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_2 rc=%d\n",
rc);
return rc;
}
@@ -974,9 +1213,19 @@ int smblib_get_prop_batt_health(struct smb_charger *chg,
stat);
if (stat & CHARGER_ERROR_STATUS_BAT_OV_BIT) {
- dev_err(chg->dev, "battery over-voltage\n");
- val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- goto done;
+ rc = smblib_get_prop_batt_voltage_now(chg, &pval);
+ if (!rc) {
+ /*
+ * If Vbatt is within 40mV above Vfloat, then don't
+ * treat it as overvoltage.
+ */
+ if (pval.intval >=
+ get_effective_result(chg->fv_votable) + 40000) {
+ val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
+ smblib_err(chg, "battery over-voltage\n");
+ goto done;
+ }
+ }
}
if (stat & BAT_TEMP_STATUS_TOO_COLD_BIT)
@@ -1009,7 +1258,7 @@ int smblib_get_prop_input_current_limited(struct smb_charger *chg,
rc = smblib_read(chg, AICL_STATUS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read AICL_STATUS rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n", rc);
return rc;
}
val->intval = (stat & SOFT_ILIMIT_BIT) || chg->is_hdc;
@@ -1068,7 +1317,7 @@ int smblib_get_prop_step_chg_step(struct smb_charger *chg,
rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
rc);
return rc;
}
@@ -1079,6 +1328,24 @@ int smblib_get_prop_step_chg_step(struct smb_charger *chg,
return rc;
}
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ val->intval = (stat == TERMINATE_CHARGE);
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -1090,14 +1357,14 @@ int smblib_set_prop_input_suspend(struct smb_charger *chg,
rc = vote(chg->usb_suspend_votable, USER_VOTER, (bool)val->intval, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't vote to %s USB rc=%d\n",
+ smblib_err(chg, "Couldn't vote to %s USB rc=%d\n",
(bool)val->intval ? "suspend" : "resume", rc);
return rc;
}
rc = vote(chg->dc_suspend_votable, USER_VOTER, (bool)val->intval, 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't vote to %s DC rc=%d\n",
+ smblib_err(chg, "Couldn't vote to %s DC rc=%d\n",
(bool)val->intval ? "suspend" : "resume", rc);
return rc;
}
@@ -1130,13 +1397,14 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
chg->system_temp_level = val->intval;
if (chg->system_temp_level == chg->thermal_levels)
- return vote(chg->chg_disable_votable, THERMAL_DAEMON, true, 0);
+ return vote(chg->chg_disable_votable,
+ THERMAL_DAEMON_VOTER, true, 0);
- vote(chg->chg_disable_votable, THERMAL_DAEMON, false, 0);
+ vote(chg->chg_disable_votable, THERMAL_DAEMON_VOTER, false, 0);
if (chg->system_temp_level == 0)
- return vote(chg->fcc_votable, THERMAL_DAEMON, false, 0);
+ return vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, false, 0);
- vote(chg->fcc_votable, THERMAL_DAEMON, true,
+ vote(chg->fcc_votable, THERMAL_DAEMON_VOTER, true,
chg->thermal_mitigation[chg->system_temp_level]);
return 0;
}
@@ -1153,7 +1421,7 @@ int smblib_get_prop_dc_present(struct smb_charger *chg,
rc = smblib_read(chg, DC_INT_RT_STS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read DC_INT_RT_STS_REG rc=%d\n",
+ smblib_err(chg, "Couldn't read DC_INT_RT_STS_REG rc=%d\n",
rc);
return rc;
}
@@ -1178,7 +1446,7 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
rc);
return rc;
}
@@ -1223,8 +1491,7 @@ int smblib_get_prop_usb_present(struct smb_charger *chg,
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
@@ -1248,7 +1515,7 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
rc);
return rc;
}
@@ -1257,7 +1524,6 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
val->intval = (stat & USE_USBIN_BIT) &&
(stat & VALID_INPUT_POWER_SOURCE_BIT);
-
return rc;
}
@@ -1280,10 +1546,18 @@ int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
return iio_read_channel_processed(chg->iio.usbin_v_chan, &val->intval);
}
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ val->intval = get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+ return 0;
+}
+
int smblib_get_prop_usb_current_max(struct smb_charger *chg,
union power_supply_propval *val)
{
- val->intval = get_effective_result_locked(chg->usb_icl_votable);
+ val->intval = get_client_vote_locked(chg->usb_icl_votable,
+ USB_PSY_VOTER);
return 0;
}
@@ -1348,8 +1622,7 @@ int smblib_get_prop_typec_cc_orientation(struct smb_charger *chg,
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return rc;
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n",
@@ -1383,7 +1656,7 @@ static int smblib_get_prop_ufp_mode(struct smb_charger *chg)
rc = smblib_read(chg, TYPE_C_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_1 rc=%d\n", rc);
return POWER_SUPPLY_TYPEC_NONE;
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_1 = 0x%02x\n", stat);
@@ -1411,7 +1684,7 @@ static int smblib_get_prop_dfp_mode(struct smb_charger *chg)
rc = smblib_read(chg, TYPE_C_STATUS_2_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_2 rc=%d\n", rc);
return POWER_SUPPLY_TYPEC_NONE;
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_2 = 0x%02x\n", stat);
@@ -1442,7 +1715,7 @@ int smblib_get_prop_typec_mode(struct smb_charger *chg,
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
val->intval = POWER_SUPPLY_TYPEC_NONE;
return rc;
}
@@ -1469,7 +1742,7 @@ int smblib_get_prop_typec_power_role(struct smb_charger *chg,
rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
rc);
return rc;
}
@@ -1493,7 +1766,7 @@ int smblib_get_prop_typec_power_role(struct smb_charger *chg,
break;
default:
val->intval = POWER_SUPPLY_TYPEC_PR_NONE;
- dev_err(chg->dev, "unsupported power role 0x%02lx\n",
+ smblib_err(chg, "unsupported power role 0x%02lx\n",
ctrl & (DFP_EN_CMD_BIT | UFP_EN_CMD_BIT));
return -EINVAL;
}
@@ -1504,7 +1777,7 @@ int smblib_get_prop_typec_power_role(struct smb_charger *chg,
int smblib_get_prop_pd_allowed(struct smb_charger *chg,
union power_supply_propval *val)
{
- val->intval = get_effective_result_locked(chg->pd_allowed_votable);
+ val->intval = get_effective_result(chg->pd_allowed_votable);
return 0;
}
@@ -1514,16 +1787,68 @@ int smblib_get_prop_input_current_settled(struct smb_charger *chg,
return smblib_get_charge_param(chg, &chg->param.icl_stat, &val->intval);
}
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 ctrl;
+
+ rc = smblib_read(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, &ctrl);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ val->intval = ctrl & EXIT_SNK_BASED_ON_CC_BIT;
+ return 0;
+}
+
+int smblib_get_pe_start(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ /*
+ * hvdcp timeout voter is the last one to allow pd. Use its vote
+ * to indicate start of pe engine
+ */
+ val->intval
+ = !get_client_vote_locked(chg->pd_disallowed_votable_indirect,
+ HVDCP_TIMEOUT_VOTER);
+ return 0;
+}
+
/*******************
* USB PSY SETTERS *
* *****************/
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ if (chg->pd_active)
+ rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+ else
+ rc = -EPERM;
+
+ return rc;
+}
+
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc;
- rc = vote(chg->usb_icl_votable, PD_VOTER, true, val->intval);
+ if (!chg->pd_active) {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, val->intval);
+ } else if (chg->system_suspend_supported) {
+ if (val->intval <= USBIN_25MA)
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, val->intval);
+ else
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ false, 0);
+ }
return rc;
}
@@ -1547,14 +1872,14 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
power_role = DFP_EN_CMD_BIT;
break;
default:
- dev_err(chg->dev, "power role %d not supported\n", val->intval);
+ smblib_err(chg, "power role %d not supported\n", val->intval);
return -EINVAL;
}
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, power_role);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
+ smblib_err(chg, "Couldn't write 0x%02x to TYPE_C_INTRPT_ENB_SOFTWARE_CTRL rc=%d\n",
power_role, rc);
return rc;
}
@@ -1571,12 +1896,16 @@ int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
rc = smblib_set_usb_pd_allowed_voltage(chg, min_uv,
chg->voltage_max_uv);
if (rc < 0) {
- dev_err(chg->dev, "invalid max voltage %duV rc=%d\n",
+ smblib_err(chg, "invalid max voltage %duV rc=%d\n",
val->intval, rc);
return rc;
}
- chg->voltage_min_uv = val->intval;
+ if (chg->mode == PARALLEL_MASTER)
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
+ min_uv > MICRO_5V, 0);
+
+ chg->voltage_min_uv = min_uv;
return rc;
}
@@ -1589,12 +1918,12 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
rc = smblib_set_usb_pd_allowed_voltage(chg, chg->voltage_min_uv,
max_uv);
if (rc < 0) {
- dev_err(chg->dev, "invalid min voltage %duV rc=%d\n",
+ smblib_err(chg, "invalid min voltage %duV rc=%d\n",
val->intval, rc);
return rc;
}
- chg->voltage_max_uv = val->intval;
+ chg->voltage_max_uv = max_uv;
return rc;
}
@@ -1602,63 +1931,101 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val)
{
int rc;
- u8 stat;
+ u8 stat = 0;
+ bool cc_debounced;
+ bool orientation;
+ bool pd_active = val->intval;
if (!get_effective_result(chg->pd_allowed_votable)) {
- dev_err(chg->dev, "PD is not allowed\n");
+ smblib_err(chg, "PD is not allowed\n");
return -EINVAL;
}
- rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
- AUTO_SRC_DETECT_BIT,
- val->intval ? 0 : AUTO_SRC_DETECT_BIT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't %s APSD rc=%d\n",
- val->intval ? "disable" : "enable", rc);
- return rc;
- }
-
- vote(chg->pd_allowed_votable, PD_VOTER, val->intval, 0);
+ vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
+ vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
/*
* VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
* when TYPEC_SPARE_CFG_BIT (CC pin selection s/w override) is set
* or when VCONN_EN_VALUE_BIT is set.
*/
- if (val->intval) {
- rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
- if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
- return rc;
- }
+ rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
+ return rc;
+ }
- stat &= CC_ORIENTATION_BIT;
+ if (pd_active) {
+ orientation = stat & CC_ORIENTATION_BIT;
rc = smblib_masked_write(chg,
- TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- VCONN_EN_ORIENTATION_BIT,
- stat ? 0 : VCONN_EN_ORIENTATION_BIT);
- if (rc < 0)
- dev_err(chg->dev,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ VCONN_EN_ORIENTATION_BIT,
+ orientation ? 0 : VCONN_EN_ORIENTATION_BIT);
+ if (rc < 0) {
+ smblib_err(chg,
"Couldn't enable vconn on CC line rc=%d\n", rc);
+ return rc;
+ }
}
/* CC pin selection s/w override in PD session; h/w otherwise. */
rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
TYPEC_SPARE_CFG_BIT,
- val->intval ? TYPEC_SPARE_CFG_BIT : 0);
+ pd_active ? TYPEC_SPARE_CFG_BIT : 0);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't change cc_out ctrl to %s rc=%d\n",
- val->intval ? "SW" : "HW", rc);
+ smblib_err(chg, "Couldn't change cc_out ctrl to %s rc=%d\n",
+ pd_active ? "SW" : "HW", rc);
return rc;
}
- chg->pd_active = (bool)val->intval;
+ cc_debounced = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ if (!pd_active && cc_debounced)
+ try_rerun_apsd_for_hvdcp(chg);
+
+ chg->pd_active = pd_active;
smblib_update_usb_type(chg);
+ power_supply_changed(chg->usb_psy);
+
+ rc = smblib_masked_write(chg, TYPE_C_CFG_3_REG, EN_TRYSINK_MODE_BIT,
+ chg->pd_active ? 0 : EN_TRYSINK_MODE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set TRYSINK_MODE rc=%d\n", rc);
+ return rc;
+ }
+
return rc;
}
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ EXIT_SNK_BASED_ON_CC_BIT,
+ (val->intval) ? EXIT_SNK_BASED_ON_CC_BIT : 0);
+
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, val->intval, 0);
+
+ return rc;
+}
+
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *pval)
+{
+ if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+ if (IS_ERR(chg->iio.batt_i_chan))
+ return PTR_ERR(chg->iio.batt_i_chan);
+
+ return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
/**********************
* INTERRUPT HANDLERS *
**********************/
@@ -1672,43 +2039,54 @@ irqreturn_t smblib_handle_debug(int irq, void *data)
return IRQ_HANDLED;
}
-irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+static void smblib_pl_handle_chg_state_change(struct smb_charger *chg, u8 stat)
{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
- union power_supply_propval pval = {0, };
- int rc;
-
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ bool pl_enabled;
if (chg->mode != PARALLEL_MASTER)
- return IRQ_HANDLED;
+ return;
- rc = smblib_get_prop_batt_charge_type(chg, &pval);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt charge type rc=%d\n", rc);
- return IRQ_HANDLED;
+ pl_enabled = !get_effective_result_locked(chg->pl_disable_votable);
+ switch (stat) {
+ case FAST_CHARGE:
+ case FULLON_CHARGE:
+ vote(chg->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+ break;
+ case TAPER_CHARGE:
+ if (pl_enabled) {
+ cancel_delayed_work_sync(&chg->pl_taper_work);
+ schedule_delayed_work(&chg->pl_taper_work, 0);
+ }
+ break;
+ case TERMINATE_CHARGE:
+ case INHIBIT_CHARGE:
+ case DISABLE_CHARGE:
+ vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ break;
+ default:
+ break;
}
+}
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST)
- vote(chg->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
- && !get_effective_result_locked(chg->pl_disable_votable)) {
- cancel_delayed_work_sync(&chg->pl_taper_work);
- schedule_delayed_work(&chg->pl_taper_work, 0);
- }
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
- rc = smblib_get_prop_batt_status(chg, &pval);
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt status type rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
return IRQ_HANDLED;
}
- if (pval.intval == POWER_SUPPLY_STATUS_FULL) {
- power_supply_changed(chg->batt_psy);
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
- }
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ smblib_pl_handle_chg_state_change(chg, stat);
+ power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -1756,7 +2134,7 @@ irqreturn_t smblib_handle_step_chg_soc_update_request(int irq, void *data)
rc = smblib_get_prop_batt_capacity(chg, &pval);
if (rc < 0)
- dev_err(chg->dev, "Couldn't get batt capacity rc=%d\n", rc);
+ smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
else
step_charge_soc_update(chg, pval.intval);
@@ -1805,7 +2183,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
"dpdm-supply", NULL)) {
chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
if (IS_ERR(chg->dpdm_reg)) {
- dev_err(chg->dev, "Couldn't get dpdm regulator rc=%ld\n",
+ smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
PTR_ERR(chg->dpdm_reg));
chg->dpdm_reg = NULL;
}
@@ -1816,7 +2194,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
return IRQ_HANDLED;
}
@@ -1827,7 +2205,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
rc = regulator_enable(chg->dpdm_reg);
if (rc < 0)
- dev_err(chg->dev, "Couldn't enable dpdm regulator rc=%d\n",
+ smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
rc);
}
} else {
@@ -1835,7 +2213,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
rc = regulator_disable(chg->dpdm_reg);
if (rc < 0)
- dev_err(chg->dev, "Couldn't disable dpdm regulator rc=%d\n",
+ smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n",
rc);
}
}
@@ -1847,51 +2225,34 @@ skip_dpdm_float:
return IRQ_HANDLED;
}
-#define MICRO_5P5V 5500000
-#define USB_WEAK_INPUT_MA 1500000
-static bool is_icl_pl_ready(struct smb_charger *chg)
+#define USB_WEAK_INPUT_UA 1400000
+#define EFFICIENCY_PCT 80
+irqreturn_t smblib_handle_icl_change(int irq, void *data)
{
- union power_supply_propval pval = {0, };
- int icl_ma;
- int rc;
-
- rc = smblib_get_prop_usb_voltage_now(chg, &pval);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't get prop usb voltage rc=%d\n", rc);
- return false;
- }
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc, icl_ua;
- if (pval.intval <= MICRO_5P5V) {
- rc = smblib_get_charge_param(chg,
- &chg->param.icl_stat, &icl_ma);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't get ICL status rc=%d\n",
- rc);
- return false;
- }
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
- if (icl_ma < USB_WEAK_INPUT_MA)
- return false;
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return IRQ_HANDLED;
}
- /*
- * Always enable parallel charging when USB INPUT is higher than 5V
- * regardless of the AICL results. Assume chargers above 5V are strong
- */
+ if (chg->mode != PARALLEL_MASTER)
+ return IRQ_HANDLED;
- return true;
-}
+ chg->input_limited_fcc_ua = div64_s64(
+ (s64)icl_ua * MICRO_5V * EFFICIENCY_PCT,
+ (s64)get_effective_result(chg->fv_votable) * 100);
-irqreturn_t smblib_handle_icl_change(int irq, void *data)
-{
- struct smb_irq_data *irq_data = data;
- struct smb_charger *chg = irq_data->parent_data;
-
- if (chg->mode == PARALLEL_MASTER)
- vote(chg->pl_disable_votable, USBIN_ICL_VOTER,
- !is_icl_pl_ready(chg), 0);
+ if (!get_effective_result(chg->pl_disable_votable))
+ rerun_election(chg->fcc_votable);
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+ icl_ua >= USB_WEAK_INPUT_UA, 0);
return IRQ_HANDLED;
}
@@ -1926,12 +2287,31 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
if (!rising)
return;
+ if (chg->mode == PARALLEL_MASTER)
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
+
/* the APSD done handler will set the USB supply type */
apsd_result = smblib_get_apsd_result(chg);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
apsd_result->name);
}
+static void smblib_handle_hvdcp_check_timeout(struct smb_charger *chg,
+ bool rising, bool qc_charger)
+{
+ /* Hold off PD only until hvdcp 2.0 detection timeout */
+ if (rising) {
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+ false, 0);
+ if (get_effective_result(chg->pd_disallowed_votable_indirect))
+ /* could be a legacy cable, try doing hvdcp */
+ try_rerun_apsd_for_hvdcp(chg);
+ }
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
+ rising ? "rising" : "falling");
+}
+
/* triggers when HVDCP is detected */
static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
bool rising)
@@ -1948,32 +2328,30 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg,
#define HVDCP_DET_MS 2500
static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
{
- int rc;
const struct apsd_result *apsd_result;
if (!rising)
return;
- apsd_result = smblib_get_apsd_result(chg);
+ apsd_result = smblib_update_usb_type(chg);
switch (apsd_result->bit) {
case SDP_CHARGER_BIT:
case CDP_CHARGER_BIT:
case OCP_CHARGER_BIT:
case FLOAT_CHARGER_BIT:
- vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0);
+ /* if not DCP then no hvdcp timeout happens. Enable pd here */
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+ false, 0);
break;
case DCP_CHARGER_BIT:
- schedule_delayed_work(&chg->hvdcp_detect_work,
- msecs_to_jiffies(HVDCP_DET_MS));
+ if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
+ schedule_delayed_work(&chg->hvdcp_detect_work,
+ msecs_to_jiffies(HVDCP_DET_MS));
break;
default:
break;
}
- rc = smblib_update_usb_type(chg);
- if (rc < 0)
- dev_err(chg->dev, "Couldn't update usb type rc=%d\n", rc);
-
smblib_dbg(chg, PR_INTERRUPT, "IRQ: apsd-done rising; %s detected\n",
apsd_result->name);
}
@@ -1987,7 +2365,7 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
rc = smblib_read(chg, APSD_STATUS_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read APSD_STATUS rc=%d\n", rc);
+ smblib_err(chg, "Couldn't read APSD_STATUS rc=%d\n", rc);
return IRQ_HANDLED;
}
smblib_dbg(chg, PR_REGISTER, "APSD_STATUS = 0x%02x\n", stat);
@@ -1998,6 +2376,10 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
smblib_handle_hvdcp_detect_done(chg,
(bool)(stat & QC_CHARGER_BIT));
+ smblib_handle_hvdcp_check_timeout(chg,
+ (bool)(stat & HVDCP_CHECK_TIMEOUT_BIT),
+ (bool)(stat & QC_CHARGER_BIT));
+
smblib_handle_hvdcp_3p0_auth_done(chg,
(bool)(stat & QC_AUTH_DONE_STATUS_BIT));
@@ -2015,47 +2397,124 @@ irqreturn_t smblib_handle_usb_source_change(int irq, void *data)
return IRQ_HANDLED;
}
-static void smblib_handle_typec_cc(struct smb_charger *chg, bool attached)
+static void typec_source_removal(struct smb_charger *chg)
{
int rc;
- if (!attached) {
- rc = smblib_detach_usb(chg);
- if (rc < 0)
- dev_err(chg->dev, "Couldn't detach USB rc=%d\n", rc);
+ vote(chg->pl_disable_votable, TYPEC_SRC_VOTER, true, 0);
+ /* reset both usbin current and voltage votes */
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
+ vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
+ /* reset taper_end voter here */
+ vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
+
+ cancel_delayed_work_sync(&chg->hvdcp_detect_work);
+
+ /* reconfigure allowed voltage for HVDCP */
+ rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
+ USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
+ rc);
+
+ chg->voltage_min_uv = MICRO_5V;
+ chg->voltage_max_uv = MICRO_5V;
+
+ /* clear USB ICL vote for PD_VOTER */
+ rc = vote(chg->usb_icl_votable, PD_VOTER, false, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+
+ /* clear USB ICL vote for USB_PSY_VOTER */
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't un-vote for USB ICL rc=%d\n", rc);
+}
+
+static void typec_source_insertion(struct smb_charger *chg)
+{
+ vote(chg->pl_disable_votable, TYPEC_SRC_VOTER, false, 0);
+}
+
+static void typec_sink_insertion(struct smb_charger *chg)
+{
+ /* when a sink is inserted we should not wait on hvdcp timeout to
+ * enable pd
+ */
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+ false, 0);
+}
+
+static void smblib_handle_typec_removal(struct smb_charger *chg)
+{
+ vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
+ vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
+ vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
+
+ /* reset votes from vbus_cc_short */
+ vote(chg->hvdcp_disable_votable, VBUS_CC_SHORT_VOTER, true, 0);
+
+ vote(chg->hvdcp_disable_votable, PD_INACTIVE_VOTER, true, 0);
+
+ /*
+ * cable could be removed during hard reset, remove its vote to
+ * disable apsd
+ */
+ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0);
+
+ typec_source_removal(chg);
+
+ smblib_update_usb_type(chg);
+}
+
+static void smblib_handle_typec_insertion(struct smb_charger *chg,
+ bool sink_attached, bool legacy_cable)
+{
+ int rp;
+ bool vbus_cc_short = false;
+
+ vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
+
+ if (sink_attached) {
+ typec_source_removal(chg);
+ typec_sink_insertion(chg);
+ } else {
+ typec_source_insertion(chg);
}
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: CC %s\n",
- attached ? "attached" : "detached");
+ vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
+ legacy_cable, 0);
+
+ if (legacy_cable) {
+ rp = smblib_get_prop_ufp_mode(chg);
+ if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+ || rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+ vbus_cc_short = true;
+ smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
+ rp);
+ }
+ }
+
+ vote(chg->hvdcp_disable_votable, VBUS_CC_SHORT_VOTER, vbus_cc_short, 0);
+ vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
+ vbus_cc_short, 0);
}
static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
- bool rising, bool sink_attached)
+ bool rising, bool sink_attached, bool legacy_cable)
{
int rc;
union power_supply_propval pval = {0, };
- /* allow PD for attached sinks */
- if (rising && sink_attached)
- vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0);
+ if (rising)
+ smblib_handle_typec_insertion(chg, sink_attached, legacy_cable);
+ else
+ smblib_handle_typec_removal(chg);
rc = smblib_get_prop_typec_mode(chg, &pval);
if (rc < 0)
- dev_err(chg->dev, "Couldn't get prop typec mode rc=%d\n", rc);
-
- /*
- * vote to enable parallel charging if a source is attached, and disable
- * otherwise
- */
- vote(chg->pl_disable_votable, TYPEC_SRC_VOTER,
- !rising || sink_attached, 0);
-
- if (!rising || sink_attached) {
- /* icl votes to disable parallel charging */
- vote(chg->pl_disable_votable, USBIN_ICL_VOTER, true, 0);
- /* reset taper_end voter here */
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
- }
+ smblib_err(chg, "Couldn't get prop typec mode rc=%d\n", rc);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: debounce-done %s; Type-C %s detected\n",
rising ? "rising" : "falling",
@@ -2068,28 +2527,43 @@ irqreturn_t smblib_handle_usb_typec_change(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
int rc;
u8 stat;
+ bool debounce_done, sink_attached, legacy_cable;
rc = smblib_read(chg, TYPE_C_STATUS_4_REG, &stat);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't read TYPE_C_STATUS_4 rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_4 rc=%d\n", rc);
return IRQ_HANDLED;
}
smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_4 = 0x%02x\n", stat);
+ debounce_done = (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT);
+ sink_attached = (bool)(stat & UFP_DFP_MODE_STATUS_BIT);
- if (stat & TYPEC_VBUS_ERROR_STATUS_BIT) {
- dev_err(chg->dev, "IRQ: vbus-error rising\n");
+ rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
return IRQ_HANDLED;
}
+ smblib_dbg(chg, PR_REGISTER, "TYPE_C_STATUS_5 = 0x%02x\n", stat);
+ legacy_cable = (bool)(stat & TYPEC_LEGACY_CABLE_STATUS_BIT);
- smblib_handle_typec_cc(chg,
- (bool)(stat & CC_ATTACHED_BIT));
smblib_handle_typec_debounce_done(chg,
- (bool)(stat & TYPEC_DEBOUNCE_DONE_STATUS_BIT),
- (bool)(stat & UFP_DFP_MODE_STATUS_BIT));
+ debounce_done, sink_attached, legacy_cable);
power_supply_changed(chg->usb_psy);
+ if (stat & TYPEC_VBUS_ERROR_STATUS_BIT)
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s vbus-error\n",
+ irq_data->name);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+
+ power_supply_changed(chg->dc_psy);
return IRQ_HANDLED;
}
@@ -2112,14 +2586,15 @@ static void smblib_hvdcp_detect_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
hvdcp_detect_work.work);
- const struct apsd_result *apsd_result;
- apsd_result = smblib_get_apsd_result(chg);
- if (apsd_result->bit &&
- !(apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT))) {
- vote(chg->pd_allowed_votable, DEFAULT_VOTER, true, 0);
+ vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
+ false, 0);
+ if (get_effective_result(chg->pd_disallowed_votable_indirect))
+ /* pd is still disabled, try hvdcp */
+ try_rerun_apsd_for_hvdcp(chg);
+ else
+ /* notify pd now that pd is allowed */
power_supply_changed(chg->usb_psy);
- }
}
static void bms_update_work(struct work_struct *work)
@@ -2138,7 +2613,7 @@ static void step_soc_req_work(struct work_struct *work)
rc = smblib_get_prop_batt_capacity(chg, &pval);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt capacity rc=%d\n", rc);
+ smblib_err(chg, "Couldn't get batt capacity rc=%d\n", rc);
return;
}
@@ -2150,13 +2625,12 @@ static void smblib_pl_detect_work(struct work_struct *work)
struct smb_charger *chg = container_of(work, struct smb_charger,
pl_detect_work);
- if (!get_effective_result_locked(chg->pl_disable_votable))
- rerun_election(chg->pl_disable_votable);
+ vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
}
#define MINIMUM_PARALLEL_FCC_UA 500000
#define PL_TAPER_WORK_DELAY_MS 100
-#define TAPER_RESIDUAL_PERCENT 75
+#define TAPER_RESIDUAL_PCT 75
static void smblib_pl_taper_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -2164,22 +2638,22 @@ static void smblib_pl_taper_work(struct work_struct *work)
union power_supply_propval pval = {0, };
int rc;
- if (chg->pl.slave_fcc < MINIMUM_PARALLEL_FCC_UA) {
+ if (chg->pl.slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
vote(chg->pl_disable_votable, TAPER_END_VOTER, true, 0);
goto done;
}
rc = smblib_get_prop_batt_charge_type(chg, &pval);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't get batt charge type rc=%d\n", rc);
+ smblib_err(chg, "Couldn't get batt charge type rc=%d\n", rc);
goto done;
}
if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
- vote(chg->awake_votable, PL_VOTER, true, 0);
+ vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, true, 0);
/* Reduce the taper percent by 25 percent */
- chg->pl.taper_percent = chg->pl.taper_percent
- * TAPER_RESIDUAL_PERCENT / 100;
+ chg->pl.taper_pct = chg->pl.taper_pct
+ * TAPER_RESIDUAL_PCT / 100;
rerun_election(chg->fcc_votable);
schedule_delayed_work(&chg->pl_taper_work,
msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
@@ -2190,7 +2664,7 @@ static void smblib_pl_taper_work(struct work_struct *work)
* Master back to Fast Charge, get out of this round of taper reduction
*/
done:
- vote(chg->awake_votable, PL_VOTER, false, 0);
+ vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, false, 0);
}
static void clear_hdc_work(struct work_struct *work)
@@ -2261,8 +2735,16 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->pd_allowed_votable = create_votable("PD_ALLOWED", VOTE_SET_ANY,
- NULL, NULL);
+ chg->pd_disallowed_votable_indirect
+ = create_votable("PD_DISALLOWED_INDIRECT", VOTE_SET_ANY,
+ smblib_pd_disallowed_votable_indirect_callback, chg);
+ if (IS_ERR(chg->pd_disallowed_votable_indirect)) {
+ rc = PTR_ERR(chg->pd_disallowed_votable_indirect);
+ return rc;
+ }
+
+ chg->pd_allowed_votable = create_votable("PD_ALLOWED",
+ VOTE_SET_ANY, NULL, NULL);
if (IS_ERR(chg->pd_allowed_votable)) {
rc = PTR_ERR(chg->pd_allowed_votable);
return rc;
@@ -2292,6 +2774,33 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->pl_enable_votable_indirect = create_votable("PL_ENABLE_INDIRECT",
+ VOTE_SET_ANY,
+ smblib_pl_enable_indirect_vote_callback,
+ chg);
+ if (IS_ERR(chg->pl_enable_votable_indirect)) {
+ rc = PTR_ERR(chg->pl_enable_votable_indirect);
+ return rc;
+ }
+
+ chg->hvdcp_disable_votable = create_votable("HVDCP_DISABLE",
+ VOTE_SET_ANY,
+ smblib_hvdcp_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->hvdcp_disable_votable)) {
+ rc = PTR_ERR(chg->hvdcp_disable_votable);
+ return rc;
+ }
+
+ chg->apsd_disable_votable = create_votable("APSD_DISABLE",
+ VOTE_SET_ANY,
+ smblib_apsd_disable_vote_callback,
+ chg);
+ if (IS_ERR(chg->apsd_disable_votable)) {
+ rc = PTR_ERR(chg->apsd_disable_votable);
+ return rc;
+ }
+
return rc;
}
@@ -2311,12 +2820,20 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->usb_icl_votable);
if (chg->dc_icl_votable)
destroy_votable(chg->dc_icl_votable);
+ if (chg->pd_disallowed_votable_indirect)
+ destroy_votable(chg->pd_disallowed_votable_indirect);
if (chg->pd_allowed_votable)
destroy_votable(chg->pd_allowed_votable);
if (chg->awake_votable)
destroy_votable(chg->awake_votable);
if (chg->pl_disable_votable)
destroy_votable(chg->pl_disable_votable);
+ if (chg->chg_disable_votable)
+ destroy_votable(chg->chg_disable_votable);
+ if (chg->pl_enable_votable_indirect)
+ destroy_votable(chg->pl_enable_votable_indirect);
+ if (chg->apsd_disable_votable)
+ destroy_votable(chg->apsd_disable_votable);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -2329,6 +2846,8 @@ static void smblib_iio_deinit(struct smb_charger *chg)
iio_channel_release(chg->iio.usbin_i_chan);
if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
iio_channel_release(chg->iio.usbin_v_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ iio_channel_release(chg->iio.batt_i_chan);
}
int smblib_init(struct smb_charger *chg)
@@ -2348,26 +2867,29 @@ int smblib_init(struct smb_charger *chg)
case PARALLEL_MASTER:
rc = smblib_create_votables(chg);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't create votables rc=%d\n",
+ smblib_err(chg, "Couldn't create votables rc=%d\n",
rc);
return rc;
}
- chg->bms_psy = power_supply_get_by_name("bms");
- chg->pl.psy = power_supply_get_by_name("parallel");
-
rc = smblib_register_notifier(chg);
if (rc < 0) {
- dev_err(chg->dev,
+ smblib_err(chg,
"Couldn't register notifier rc=%d\n", rc);
return rc;
}
+ chg->bms_psy = power_supply_get_by_name("bms");
+ chg->pl.psy = power_supply_get_by_name("parallel");
+ if (chg->pl.psy)
+ vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER,
+ false, 0);
+
break;
case PARALLEL_SLAVE:
break;
default:
- dev_err(chg->dev, "Unsupported mode %d\n", chg->mode);
+ smblib_err(chg, "Unsupported mode %d\n", chg->mode);
return -EINVAL;
}
@@ -2384,7 +2906,7 @@ int smblib_deinit(struct smb_charger *chg)
case PARALLEL_SLAVE:
break;
default:
- dev_err(chg->dev, "Unsupported mode %d\n", chg->mode);
+ smblib_err(chg, "Unsupported mode %d\n", chg->mode);
return -EINVAL;
}
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index f5d9dda8330a..4be06ffcfb25 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -22,18 +22,30 @@ enum print_reason {
PR_INTERRUPT = BIT(0),
PR_REGISTER = BIT(1),
PR_MISC = BIT(2),
+ PR_PARALLEL = BIT(3),
};
-#define DEFAULT_VOTER "DEFAULT_VOTER"
-#define USER_VOTER "USER_VOTER"
-#define PD_VOTER "PD_VOTER"
-#define PL_VOTER "PL_VOTER"
-#define USBIN_ICL_VOTER "USBIN_ICL_VOTER"
-#define CHG_STATE_VOTER "CHG_STATE_VOTER"
-#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
-#define TAPER_END_VOTER "TAPER_END_VOTER"
-#define FCC_MAX_RESULT "FCC_MAX_RESULT"
-#define THERMAL_DAEMON "THERMAL_DAEMON"
+#define DEFAULT_VOTER "DEFAULT_VOTER"
+#define USER_VOTER "USER_VOTER"
+#define PD_VOTER "PD_VOTER"
+#define USB_PSY_VOTER "USB_PSY_VOTER"
+#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
+#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
+#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
+#define USBIN_I_VOTER "USBIN_I_VOTER"
+#define USBIN_V_VOTER "USBIN_V_VOTER"
+#define CHG_STATE_VOTER "CHG_STATE_VOTER"
+#define TYPEC_SRC_VOTER "TYPEC_SRC_VOTER"
+#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define FCC_MAX_RESULT_VOTER "FCC_MAX_RESULT_VOTER"
+#define THERMAL_DAEMON_VOTER "THERMAL_DAEMON_VOTER"
+#define CC_DETACHED_VOTER "CC_DETACHED_VOTER"
+#define HVDCP_TIMEOUT_VOTER "HVDCP_TIMEOUT_VOTER"
+#define PD_DISALLOWED_INDIRECT_VOTER "PD_DISALLOWED_INDIRECT_VOTER"
+#define PD_HARD_RESET_VOTER "PD_HARD_RESET_VOTER"
+#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
+#define LEGACY_CABLE_VOTER "LEGACY_CABLE_VOTER"
+#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -41,6 +53,10 @@ enum smb_mode {
NUM_MODES,
};
+enum {
+ QC_CHARGER_DETECTION_WA_BIT = BIT(0),
+};
+
struct smb_regulator {
struct regulator_dev *rdev;
struct regulator_desc rdesc;
@@ -70,6 +86,7 @@ struct smb_params {
struct smb_chg_param fv;
struct smb_chg_param usb_icl;
struct smb_chg_param icl_stat;
+ struct smb_chg_param otg_cl;
struct smb_chg_param dc_icl;
struct smb_chg_param dc_icl_pt_lv;
struct smb_chg_param dc_icl_pt_hv;
@@ -81,13 +98,14 @@ struct smb_params {
struct smb_chg_param step_soc_threshold[4];
struct smb_chg_param step_soc;
struct smb_chg_param step_cc_delta[5];
+ struct smb_chg_param freq_buck;
};
struct parallel_params {
struct power_supply *psy;
- int *master_percent;
- int taper_percent;
- int slave_fcc;
+ int slave_pct;
+ int taper_pct;
+ int slave_fcc_ua;
};
struct smb_iio {
@@ -95,10 +113,12 @@ struct smb_iio {
struct iio_channel *temp_max_chan;
struct iio_channel *usbin_i_chan;
struct iio_channel *usbin_v_chan;
+ struct iio_channel *batt_i_chan;
};
struct smb_charger {
struct device *dev;
+ char *name;
struct regmap *regmap;
struct smb_params param;
struct smb_iio iio;
@@ -135,10 +155,14 @@ struct smb_charger {
struct votable *fv_votable;
struct votable *usb_icl_votable;
struct votable *dc_icl_votable;
+ struct votable *pd_disallowed_votable_indirect;
struct votable *pd_allowed_votable;
struct votable *awake_votable;
struct votable *pl_disable_votable;
struct votable *chg_disable_votable;
+ struct votable *pl_enable_votable_indirect;
+ struct votable *hvdcp_disable_votable;
+ struct votable *apsd_disable_votable;
/* work */
struct work_struct bms_update_work;
@@ -152,17 +176,22 @@ struct smb_charger {
/* cached status */
int voltage_min_uv;
int voltage_max_uv;
- bool pd_active;
+ int pd_active;
bool vbus_present;
+ bool system_suspend_supported;
int system_temp_level;
int thermal_levels;
int *thermal_mitigation;
+ int otg_cl_ua;
+
int fake_capacity;
bool step_chg_enabled;
bool is_hdc;
+ bool chg_done;
+ int input_limited_fcc_ua;
/* workaround flag */
u32 wa_flags;
@@ -209,6 +238,7 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data);
irqreturn_t smblib_handle_usb_source_change(int irq, void *data);
irqreturn_t smblib_handle_icl_change(int irq, void *data);
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
+irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
@@ -221,6 +251,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_system_temp_level(struct smb_charger *chg,
@@ -260,6 +292,8 @@ int smblib_get_prop_usb_suspend(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_usb_voltage_now(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_pd_current_max(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_usb_current_max(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_usb_current_now(struct smb_charger *chg,
@@ -274,10 +308,16 @@ int smblib_get_prop_pd_allowed(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_input_current_settled(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_pd_in_hard_reset(struct smb_charger *chg,
+ union power_supply_propval *val);
+int smblib_get_pe_start(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_charger_temp(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_charger_temp_max(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_set_prop_pd_current_max(struct smb_charger *chg,
+ const union power_supply_propval *val);
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_usb_voltage_min(struct smb_charger *chg,
@@ -288,8 +328,12 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
const union power_supply_propval *val);
int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
+ const union power_supply_propval *val);
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#endif /* __SMB2_CHARGER_H */
-
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index c88d132fbf70..ba501761c209 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -41,8 +41,9 @@ enum {
FAST_CHARGE,
FULLON_CHARGE,
TAPER_CHARGE,
- COMPLETED_CHARGE,
+ TERMINATE_CHARGE,
INHIBIT_CHARGE,
+ DISABLE_CHARGE,
};
#define BATTERY_CHARGER_STATUS_2_REG (CHGR_BASE + 0x07)
@@ -51,6 +52,7 @@ enum {
#define CHARGER_ERROR_STATUS_BAT_OV_BIT BIT(5)
#define CHARGER_ERROR_STATUS_BAT_TERM_MISSING_BIT BIT(4)
#define BAT_TEMP_STATUS_MASK GENMASK(3, 0)
+#define BAT_TEMP_STATUS_SOFT_LIMIT_MASK GENMASK(3, 2)
#define BAT_TEMP_STATUS_HOT_SOFT_LIMIT_BIT BIT(3)
#define BAT_TEMP_STATUS_COLD_SOFT_LIMIT_BIT BIT(2)
#define BAT_TEMP_STATUS_TOO_HOT_BIT BIT(1)
@@ -365,6 +367,9 @@ enum {
#define OTG_EN_SRC_CFG_BIT BIT(1)
#define CONCURRENT_MODE_CFG_BIT BIT(0)
+#define OTG_ENG_OTG_CFG_REG (OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0)
+
/* BATIF Peripheral Registers */
/* BATIF Interrupt Bits */
#define BAT_7_RT_STS_BIT BIT(7)
@@ -426,7 +431,7 @@ enum {
#define APSD_STATUS_REG (USBIN_BASE + 0x07)
#define APSD_STATUS_7_BIT BIT(7)
-#define APSD_STATUS_6_BIT BIT(6)
+#define HVDCP_CHECK_TIMEOUT_BIT BIT(6)
#define SLOW_PLUGIN_TIMEOUT_BIT BIT(5)
#define ENUMERATION_DONE_BIT BIT(4)
#define VADP_CHANGE_DONE_AFTER_AUTH_BIT BIT(3)
@@ -502,6 +507,15 @@ enum {
#define CC_ORIENTATION_BIT BIT(1)
#define CC_ATTACHED_BIT BIT(0)
+#define TYPE_C_STATUS_5_REG (USBIN_BASE + 0x0F)
+#define TRY_SOURCE_FAILED_BIT BIT(6)
+#define TRY_SINK_FAILED_BIT BIT(5)
+#define TIMER_STAGE_2_BIT BIT(4)
+#define TYPEC_LEGACY_CABLE_STATUS_BIT BIT(3)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS_BIT BIT(2)
+#define TYPEC_TRYSOURCE_DETECT_STATUS_BIT BIT(1)
+#define TYPEC_TRYSINK_DETECT_STATUS_BIT BIT(0)
+
/* USBIN Interrupt Bits */
#define TYPE_C_CHANGE_RT_STS_BIT BIT(7)
#define USBIN_ICL_CHANGE_RT_STS_BIT BIT(6)
@@ -551,6 +565,16 @@ enum {
#define TYPE_C_UFP_MODE_BIT BIT(1)
#define EN_80UA_180UA_CUR_SOURCE_BIT BIT(0)
+#define TYPE_C_CFG_3_REG (USBIN_BASE + 0x5A)
+#define TVBUS_DEBOUNCE_BIT BIT(7)
+#define TYPEC_LEGACY_CABLE_INT_EN_BIT BIT(6)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN_BIT BIT(5)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN_BIT BIT(4)
+#define TYPEC_TRYSINK_DETECT_INT_EN_BIT BIT(3)
+#define EN_TRYSINK_MODE_BIT BIT(2)
+#define EN_LEGACY_CABLE_DETECTION_BIT BIT(1)
+#define ALLOW_PD_DRING_UFP_TCCDB_BIT BIT(0)
+
#define USBIN_ADAPTER_ALLOW_CFG_REG (USBIN_BASE + 0x60)
#define USBIN_ADAPTER_ALLOW_MASK GENMASK(3, 0)
enum {
@@ -581,6 +605,7 @@ enum {
#define DCD_TIMEOUT_SEL_BIT BIT(5)
#define OCD_CURRENT_SEL_BIT BIT(4)
#define SLOW_PLUGIN_TIMER_EN_CFG_BIT BIT(3)
+#define FLOAT_OPTIONS_MASK GENMASK(2, 0)
#define FLOAT_DIS_CHGING_CFG_BIT BIT(2)
#define SUSPEND_FLOAT_CFG_BIT BIT(1)
#define FORCE_FLOAT_SDP_CFG_BIT BIT(0)
@@ -608,7 +633,7 @@ enum {
#define TYPEC_VBUS_ASSERT_INT_EN_BIT BIT(0)
#define TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG (USBIN_BASE + 0x68)
-#define EXIT_SNK_BASED_ON_CC BIT(7)
+#define EXIT_SNK_BASED_ON_CC_BIT BIT(7)
#define VCONN_EN_ORIENTATION_BIT BIT(6)
#define TYPEC_VCONN_OVERCURR_INT_EN_BIT BIT(5)
#define VCONN_EN_SRC_BIT BIT(4)
@@ -765,6 +790,13 @@ enum {
ZIN_ICL_HV_MAX_MV = 11000,
};
+#define DC_ENG_SSUPPLY_CFG3_REG (DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT BIT(0)
+
/* MISC Peripheral Registers */
#define REVISION1_REG (MISC_BASE + 0x00)
#define DIG_MINOR_MASK GENMASK(7, 0)
@@ -964,4 +996,6 @@ enum {
#define SYSOK_POL_BIT BIT(3)
#define SYSOK_OPTIONS_MASK GENMASK(2, 0)
+#define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG (MISC_BASE + 0xA0)
+
#endif /* __SMB2_CHARGER_REG_H */
diff --git a/drivers/power/qcom-charger/smb1351-charger.c b/drivers/power/qcom-charger/smb1351-charger.c
index 0f18844b9afa..79fbe33acf5d 100644
--- a/drivers/power/qcom-charger/smb1351-charger.c
+++ b/drivers/power/qcom-charger/smb1351-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1872,6 +1872,12 @@ static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
}
}
+ if (!cur) {
+ pr_debug("Couldn't choose batt state, adc state=%d and temp=%d\n",
+ state, temp);
+ return;
+ }
+
if (cur->batt_present)
chip->battery_missing = false;
else
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 33d759be9aeb..3db295b3e6e8 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -48,8 +48,8 @@ static struct smb_params v1_params = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
- .max_u = 5000000,
- .step_u = 50000,
+ .max_u = 4500000,
+ .step_u = 25000,
},
.fv = {
.name = "float voltage",
@@ -72,6 +72,13 @@ static struct smb_params v1_params = {
.max_u = 6000000,
.step_u = 25000,
},
+ .freq_buck = {
+ .name = "buck switching frequency",
+ .reg = CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG,
+ .min_u = 500,
+ .max_u = 2000,
+ .step_u = 100,
+ },
};
struct smb_dt_props {
@@ -395,6 +402,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
@@ -431,6 +439,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
rc = smblib_get_prop_charger_temp(chg, val);
break;
@@ -467,6 +478,10 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
break;
+ case POWER_SUPPLY_PROP_BUCK_FREQ:
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck,
+ val->intval);
+ break;
default:
pr_err("parallel power supply set prop %d not supported\n",
prop);
@@ -1125,6 +1140,15 @@ static int smb138x_slave_probe(struct smb138x *chip)
return rc;
}
+ /* enable parallel current sensing */
+ rc = smblib_masked_write(chg, CFG_REG,
+ VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable parallel current sensing rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* keep at the end of probe, ready to serve before notifying others */
rc = smb138x_init_parallel_psy(chip);
if (rc < 0) {
@@ -1159,6 +1183,7 @@ static int smb138x_probe(struct platform_device *pdev)
chip->chg.dev = &pdev->dev;
chip->chg.debug_mask = &__debug_mask;
+ chip->chg.name = "SMB";
chip->chg.regmap = dev_get_regmap(chip->chg.dev->parent, NULL);
if (!chip->chg.regmap) {
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 75a0de0c532b..2f109013f723 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -36,6 +36,7 @@
#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
#define EMERGENCY_DLOAD_MAGIC3 0x77777777
+#define EMMC_DLOAD_TYPE 0x2
#define SCM_IO_DISABLE_PMIC_ARBITER 1
#define SCM_IO_DEASSERT_PS_HOLD 2
@@ -46,12 +47,20 @@
static int restart_mode;
-void *restart_reason;
+static void *restart_reason, *dload_type_addr;
static bool scm_pmic_arbiter_disable_supported;
static bool scm_deassert_ps_hold_supported;
/* Download mode master kill-switch */
static void __iomem *msm_ps_hold;
static phys_addr_t tcsr_boot_misc_detect;
+static void scm_disable_sdi(void);
+
+/* Runtime could be only changed value once.
+ * There is no API from TZ to re-enable the registers.
+ * So the SDI cannot be re-enabled when it already by-passed.
+*/
+static int download_mode = 1;
+static struct kobject dload_kobj;
#ifdef CONFIG_QCOM_DLOAD_MODE
#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
@@ -64,9 +73,23 @@ static void *emergency_dload_mode_addr;
static bool scm_dload_supported;
static int dload_set(const char *val, struct kernel_param *kp);
-static int download_mode = 1;
+/* interface for exporting attributes */
+struct reset_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+ char *buf);
+ size_t (*store)(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count);
+};
+#define to_reset_attr(_attr) \
+ container_of(_attr, struct reset_attribute, attr)
+#define RESET_ATTR(_name, _mode, _show, _store) \
+ static struct reset_attribute reset_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
module_param_call(download_mode, dload_set, param_get_int,
&download_mode, 0644);
+
static int panic_prep_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -170,7 +193,10 @@ static int dload_set(const char *val, struct kernel_param *kp)
return 0;
}
#else
-#define set_dload_mode(x) do {} while (0)
+static void set_dload_mode(int on)
+{
+ return;
+}
static void enable_emergency_dload_mode(void)
{
@@ -183,6 +209,26 @@ static bool get_dload_mode(void)
}
#endif
+static void scm_disable_sdi(void)
+{
+ int ret;
+ struct scm_desc desc = {
+ .args[0] = 1,
+ .args[1] = 0,
+ .arginfo = SCM_ARGS(2),
+ };
+
+ /* Needed to bypass debug image on some chips */
+ if (!is_scm_armv8())
+ ret = scm_call_atomic2(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+ else
+ ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+ SCM_WDOG_DEBUG_BOOT_PART), &desc);
+ if (ret)
+ pr_err("Failed to disable secure wdog debug: %d\n", ret);
+}
+
void msm_set_restart_mode(int mode)
{
restart_mode = mode;
@@ -320,13 +366,6 @@ static void deassert_ps_hold(void)
static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
{
- int ret;
- struct scm_desc desc = {
- .args[0] = 1,
- .args[1] = 0,
- .arginfo = SCM_ARGS(2),
- };
-
pr_notice("Going down for restart now\n");
msm_restart_prepare(cmd);
@@ -341,16 +380,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
msm_trigger_wdog_bite();
#endif
- /* Needed to bypass debug image on some chips */
- if (!is_scm_armv8())
- ret = scm_call_atomic2(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
- else
- ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART), &desc);
- if (ret)
- pr_err("Failed to disable secure wdog debug: %d\n", ret);
-
+ scm_disable_sdi();
halt_spmi_pmic_arbiter();
deassert_ps_hold();
@@ -359,27 +389,11 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
static void do_msm_poweroff(void)
{
- int ret;
- struct scm_desc desc = {
- .args[0] = 1,
- .args[1] = 0,
- .arginfo = SCM_ARGS(2),
- };
-
pr_notice("Powering off the SoC\n");
-#ifdef CONFIG_QCOM_DLOAD_MODE
+
set_dload_mode(0);
-#endif
+ scm_disable_sdi();
qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
- /* Needed to bypass debug image on some chips */
- if (!is_scm_armv8())
- ret = scm_call_atomic2(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
- else
- ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
- SCM_WDOG_DEBUG_BOOT_PART), &desc);
- if (ret)
- pr_err("Failed to disable wdog debug: %d\n", ret);
halt_spmi_pmic_arbiter();
deassert_ps_hold();
@@ -389,6 +403,84 @@ static void do_msm_poweroff(void)
return;
}
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct reset_attribute *reset_attr = to_reset_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (reset_attr->show)
+ ret = reset_attr->show(kobj, attr, buf);
+
+ return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct reset_attribute *reset_attr = to_reset_attr(attr);
+ ssize_t ret = -EIO;
+
+ if (reset_attr->store)
+ ret = reset_attr->store(kobj, attr, buf, count);
+
+ return ret;
+}
+
+static const struct sysfs_ops reset_sysfs_ops = {
+ .show = attr_show,
+ .store = attr_store,
+};
+
+static struct kobj_type reset_ktype = {
+ .sysfs_ops = &reset_sysfs_ops,
+};
+
+static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ uint32_t read_val, show_val;
+
+ read_val = __raw_readl(dload_type_addr);
+ if (read_val == EMMC_DLOAD_TYPE)
+ show_val = 1;
+ else
+ show_val = 0;
+
+ return snprintf(buf, sizeof(show_val), "%u\n", show_val);
+}
+
+static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ uint32_t enabled;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &enabled);
+ if (ret < 0)
+ return ret;
+
+ if (!((enabled == 0) || (enabled == 1)))
+ return -EINVAL;
+
+ if (enabled == 1)
+ __raw_writel(EMMC_DLOAD_TYPE, dload_type_addr);
+ else
+ __raw_writel(0, dload_type_addr);
+
+ return count;
+}
+RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
+
+static struct attribute *reset_attrs[] = {
+ &reset_attr_emmc_dload.attr,
+ NULL
+};
+
+static struct attribute_group reset_attr_group = {
+ .attrs = reset_attrs,
+};
+
static int msm_restart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -419,6 +511,33 @@ static int msm_restart_probe(struct platform_device *pdev)
pr_err("unable to map imem EDLOAD mode offset\n");
}
+ np = of_find_compatible_node(NULL, NULL,
+ "qcom,msm-imem-dload-type");
+ if (!np) {
+ pr_err("unable to find DT imem dload-type node\n");
+ goto skip_sysfs_create;
+ } else {
+ dload_type_addr = of_iomap(np, 0);
+ if (!dload_type_addr) {
+ pr_err("unable to map imem dload-type offset\n");
+ goto skip_sysfs_create;
+ }
+ }
+
+ ret = kobject_init_and_add(&dload_kobj, &reset_ktype,
+ kernel_kobj, "%s", "dload");
+ if (ret) {
+ pr_err("%s:Error in creation kobject_add\n", __func__);
+ kobject_put(&dload_kobj);
+ goto skip_sysfs_create;
+ }
+
+ ret = sysfs_create_group(&dload_kobj, &reset_attr_group);
+ if (ret) {
+ pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+ kobject_del(&dload_kobj);
+ }
+skip_sysfs_create:
#endif
np = of_find_compatible_node(NULL, NULL,
"qcom,msm-imem-restart_reason");
@@ -454,6 +573,8 @@ static int msm_restart_probe(struct platform_device *pdev)
download_mode = scm_is_secure_device();
set_dload_mode(download_mode);
+ if (!download_mode)
+ scm_disable_sdi();
return 0;
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index ac71f2c75472..6d0c1fbe566b 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1879,7 +1879,7 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
int rc, enable, lut_entry_size, list_size, i;
const char *lable;
const __be32 *prop;
- u64 size;
+ u32 size;
struct device_node *node;
int found_pwm_subnode = 0;
int found_lpg_subnode = 0;
@@ -1968,11 +1968,18 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
return rc;
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE,
- &size, 0);
+ 0, 0);
if (!prop) {
chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
} else {
lpg_config->lut_base_addr = be32_to_cpu(*prop);
+ rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n",
+ rc);
+ return rc;
+ }
+
/*
* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
* for KPDBL/GLED LUT.
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 27a5deb1213e..80a9f0ee288b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4223,7 +4223,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
debugfs_create_file("consumers", 0444, rdev->debugfs, rdev,
&reg_consumers_fops);
- reg = regulator_get(NULL, rdev->desc->name);
+ reg = regulator_get(NULL, rdev_get_name(rdev));
if (IS_ERR(reg) || reg == NULL) {
pr_err("Error-Bad Function Input\n");
goto error;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index ad4b6ffef36e..873b4615d4a9 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1498,10 +1498,14 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* M-PHY RMMI interface clocks can be turned off */
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba)) {
- if (!is_gating_context)
- /* turn off UFS local PHY ref_clk */
- ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
@@ -1956,13 +1960,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- hba->no_ref_clk_gating = true;
-
err = ufs_qcom_ice_get_dev(host);
if (err == -EPROBE_DEFER) {
/*
@@ -2570,16 +2567,19 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
kfree(testbus);
}
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
+ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+
+ if (no_sleep)
+ return;
/* sleep a bit intermittently as we are dumping too much data */
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
usleep_range(1000, 1100);
ufs_qcom_testbus_read(hba);
usleep_range(1000, 1100);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d478767ad3dd..862d56e78086 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -539,7 +539,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
}
}
-static void ufshcd_print_host_regs(struct ufs_hba *hba)
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
{
if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
return;
@@ -571,7 +571,12 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_clk_freqs(hba);
- ufshcd_vops_dbg_register_dump(hba);
+ ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ __ufshcd_print_host_regs(hba, false);
}
static
@@ -1176,6 +1181,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
return ret;
}
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+ hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+ cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1183,7 +1194,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ ufshcd_cancel_gate_work(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
@@ -1254,14 +1265,18 @@ start:
}
break;
case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /*
+ * If the timer was active but the callback was not running
+ * we have nothing to do, just change state and return.
+ */
+ if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
break;
}
/*
- * If we here, it means gating work is either done or
+ * If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
@@ -1301,7 +1316,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
+ clk_gating.gate_work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1346,7 +1361,12 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if ((ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
ufshcd_disable_clocks(hba, true);
else
/* If link is active, device ref_clk can't be switched off */
@@ -1394,8 +1414,9 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ hrtimer_start(&hba->clk_gating.gate_hrtimer,
+ ms_to_ktime(hba->clk_gating.delay_ms),
+ HRTIMER_MODE_REL);
}
void ufshcd_release(struct ufs_hba *hba, bool no_sched)
@@ -1523,6 +1544,17 @@ out:
return count;
}
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+ struct hrtimer *timer)
+{
+ struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+ clk_gating.gate_hrtimer);
+
+ schedule_work(&hba->clk_gating.gate_work);
+
+ return HRTIMER_NORESTART;
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
@@ -1539,27 +1571,25 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
if (ufshcd_is_auto_hibern8_supported(hba))
hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
+ INIT_WORK(&gating->gate_work, ufshcd_gate_work);
INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+ /*
+ * Clock gating work must be executed only after auto hibern8
+ * timeout has expired in the hardware or after aggressive
+ * hibern8 on idle software timeout. Using jiffy based low
+ * resolution delayed work is not reliable to guarantee this,
+ * hence use a high resolution timer to make sure we schedule
+ * the gate work precisely more than hibern8 timeout.
+ *
+ * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+ */
+ hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
gating->is_enabled = true;
- /*
- * Scheduling the delayed work after 1 jiffies will make the work to
- * get schedule any time from 0ms to 1000/HZ ms which is not desirable
- * for hibern8 enter work as it may impact the performance if it gets
- * scheduled almost immediately. Hence make sure that hibern8 enter
- * work gets scheduled atleast after 2 jiffies (any time between
- * 1000/HZ ms to 2000/HZ ms).
- */
- gating->delay_ms_pwr_save = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
- 2));
- gating->delay_ms_perf = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
- 2));
+ gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+ gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
/* start with performance mode */
gating->delay_ms = gating->delay_ms_perf;
@@ -1616,8 +1646,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
}
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -1928,6 +1958,7 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
return;
if (ufshcd_is_auto_hibern8_supported(hba)) {
+ hba->hibern8_on_idle.delay_ms = 1;
hba->hibern8_on_idle.state = AUTO_HIBERN8;
/*
* Disable SW hibern8 enter on idle in case
@@ -1935,13 +1966,13 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
*/
hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
} else {
+ hba->hibern8_on_idle.delay_ms = 10;
INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
ufshcd_hibern8_enter_work);
INIT_WORK(&hba->hibern8_on_idle.exit_work,
ufshcd_hibern8_exit_work);
}
- hba->hibern8_on_idle.delay_ms = 10;
hba->hibern8_on_idle.is_enabled = true;
hba->hibern8_on_idle.delay_attr.show =
@@ -5029,7 +5060,12 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev,
"OCS error from controller = %x for tag %d\n",
ocs, lrbp->task_tag);
- ufshcd_print_host_regs(hba);
+ /*
+ * This is called in interrupt context, hence avoid sleep
+ * while printing debug registers. Also print only the minimum
+ * debug registers needed to debug OCS failure.
+ */
+ __ufshcd_print_host_regs(hba, true);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a6298f614a0b..c0714b7bea72 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -334,7 +335,7 @@ struct ufs_hba_variant_ops {
int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
int (*resume)(struct ufs_hba *, enum ufs_pm_op);
int (*full_reset)(struct ufs_hba *);
- void (*dbg_register_dump)(struct ufs_hba *hba);
+ void (*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
u32 (*get_scale_down_gear)(struct ufs_hba *);
int (*set_bus_vote)(struct ufs_hba *, bool);
@@ -396,8 +397,9 @@ enum clk_gating_state {
/**
* struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
* @state: the current clocks state
@@ -415,7 +417,8 @@ enum clk_gating_state {
* completion before gating clocks.
*/
struct ufs_clk_gating {
- struct delayed_work gate_work;
+ struct hrtimer gate_hrtimer;
+ struct work_struct gate_work;
struct work_struct ungate_work;
enum clk_gating_state state;
unsigned long delay_ms;
@@ -1241,10 +1244,11 @@ static inline int ufshcd_vops_full_reset(struct ufs_hba *hba)
}
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba,
+ bool no_sleep)
{
if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
- hba->var->vops->dbg_register_dump(hba);
+ hba->var->vops->dbg_register_dump(hba, no_sleep);
}
static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 2bc74941abc8..8c43effadc70 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -821,4 +821,12 @@ config QCOM_SMCINVOKE
Enable SMCInvoke driver which supports capability based secure
communication between QSEE and HLOS.
+config QCOM_EARLY_RANDOM
+ bool "Initialize random pool very early"
+ help
+ The standard random pool may not initialize until late in the boot
+ process which means that any calls to get random numbers before then
+ may not be truly random. Select this option to make an early call
+ to get some random data to put in the pool. If unsure, say N.
+
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 434a114c000f..0105e03b082d 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -99,3 +99,4 @@ obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o
diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c
index f4c69d624342..ecf89b2b3b37 100644
--- a/drivers/soc/qcom/common_log.c
+++ b/drivers/soc/qcom/common_log.c
@@ -20,7 +20,7 @@
#include <soc/qcom/memory_dump.h>
#define MISC_DUMP_DATA_LEN 4096
-#define PMIC_DUMP_DATA_LEN 4096
+#define PMIC_DUMP_DATA_LEN (64 * 1024)
#define VSENSE_DUMP_DATA_LEN 4096
#define RPM_DUMP_DATA_LEN (160 * 1024)
diff --git a/drivers/soc/qcom/core_hang_detect.c b/drivers/soc/qcom/core_hang_detect.c
index e9b7f612dccc..c88d4c34eecf 100644
--- a/drivers/soc/qcom/core_hang_detect.c
+++ b/drivers/soc/qcom/core_hang_detect.c
@@ -245,7 +245,9 @@ static int msm_hang_detect_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct hang_detect *hang_det = NULL;
int cpu, ret, cpu_count = 0;
- u32 treg[NR_CPUS], creg[NR_CPUS];
+ const char *name;
+ u32 treg[NR_CPUS] = {0}, creg[NR_CPUS] = {0};
+ u32 num_reg = 0;
if (!pdev->dev.of_node || !enable)
return -ENODEV;
@@ -258,15 +260,28 @@ static int msm_hang_detect_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ name = of_get_property(node, "label", NULL);
+ if (!name) {
+ pr_err("Can't get label property\n");
+ return -EINVAL;
+ }
+
+ num_reg = of_property_count_u32_elems(node,
+ "qcom,threshold-arr");
+ if (num_reg < 0) {
+ pr_err("Can't get threshold-arr property\n");
+ return -EINVAL;
+ }
+
ret = of_property_read_u32_array(node, "qcom,threshold-arr",
- treg, num_possible_cpus());
+ treg, num_reg);
if (ret) {
pr_err("Can't get threshold-arr property\n");
return -EINVAL;
}
ret = of_property_read_u32_array(node, "qcom,config-arr",
- creg, num_possible_cpus());
+ creg, num_reg);
if (ret) {
pr_err("Can't get config-arr property\n");
return -EINVAL;
@@ -289,7 +304,8 @@ static int msm_hang_detect_probe(struct platform_device *pdev)
}
ret = kobject_init_and_add(&hang_det->kobj, &core_ktype,
- &cpu_subsys.dev_root->kobj, "%s", "hang_detect");
+ &cpu_subsys.dev_root->kobj, "%s_%s",
+ "hang_detect", name);
if (ret) {
pr_err("%s:Error in creation kobject_add\n", __func__);
goto out_put_kobj;
diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c
new file mode 100644
index 000000000000..d1ab39b16c81
--- /dev/null
+++ b/drivers/soc/qcom/early_random.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/random.h>
+
+#include <soc/qcom/scm.h>
+
+#include <asm/io.h>
+#include <asm/cacheflush.h>
+
+#define TZ_SVC_CRYPTO 10
+#define PRNG_CMD_ID 0x01
+
+struct tz_prng_data {
+ uint8_t *out_buf;
+ uint32_t out_buf_sz;
+} __packed;
+
+DEFINE_SCM_BUFFER(common_scm_buf)
+#define RANDOM_BUFFER_SIZE PAGE_SIZE
+char random_buffer[RANDOM_BUFFER_SIZE] __aligned(PAGE_SIZE);
+
+void __init init_random_pool(void)
+{
+ struct tz_prng_data data;
+ int ret;
+ u32 resp;
+ struct scm_desc desc;
+
+ data.out_buf = (uint8_t *) virt_to_phys(random_buffer);
+ desc.args[0] = (unsigned long) data.out_buf;
+ desc.args[1] = data.out_buf_sz = SZ_512;
+ desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+ dmac_flush_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE);
+
+ if (!is_scm_armv8())
+ ret = scm_call_noalloc(TZ_SVC_CRYPTO, PRNG_CMD_ID, &data,
+ sizeof(data), &resp, sizeof(resp),
+ common_scm_buf,
+ SCM_BUFFER_SIZE(common_scm_buf));
+ else
+ ret = scm_call2(SCM_SIP_FNID(TZ_SVC_CRYPTO, PRNG_CMD_ID),
+ &desc);
+
+ if (!ret) {
+ dmac_inv_range(random_buffer, random_buffer +
+ RANDOM_BUFFER_SIZE);
+ add_device_randomness(random_buffer, SZ_512);
+ }
+}
+
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b7fa71dd0695..9cfca014c8ad 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2583,6 +2583,7 @@ void *glink_open(const struct glink_open_config *cfg)
ctx->notify_tx_abort = cfg->notify_tx_abort;
ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+ ctx->magic_number = GLINK_CTX_CANARY;
if (!ctx->notify_rx_intent_req)
ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2618,7 +2619,6 @@ void *glink_open(const struct glink_open_config *cfg)
GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
__func__, ctx);
- ctx->magic_number = GLINK_CTX_CANARY;
return ctx;
}
EXPORT_SYMBOL(glink_open);
@@ -5380,7 +5380,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
size_t txd_len = 0;
size_t tx_len = 0;
uint32_t num_pkts = 0;
- int ret;
+ int ret = 0;
spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
while (txd_len < xprt_ctx->mtu &&
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index 66caa6ecaad2..47c66c892736 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -875,21 +875,20 @@ static void __rx_worker(struct edge_info *einfo)
int rcu_id;
rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
if (unlikely(!einfo->rx_fifo_start)) {
rx_avail = glink_spi_xprt_read_avail(einfo);
if (!rx_avail) {
srcu_read_unlock(&einfo->use_ref, rcu_id);
return;
}
- einfo->in_ssr = false;
einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
}
- if (einfo->in_ssr) {
- srcu_read_unlock(&einfo->use_ref, rcu_id);
- return;
- }
-
glink_spi_xprt_set_poll_mode(einfo);
while (inactive_cycles < MAX_INACTIVE_CYCLES) {
if (einfo->tx_resume_needed &&
@@ -1818,9 +1817,16 @@ static int glink_wdsp_cmpnt_event_handler(struct device *dev,
spi_dev = to_spi_device(sdev);
einfo->spi_dev = spi_dev;
break;
+ case WDSP_EVENT_POST_BOOTUP:
+ einfo->in_ssr = false;
+ synchronize_srcu(&einfo->use_ref);
+ /* No break here to trigger fake rx_worker */
case WDSP_EVENT_IPC1_INTR:
queue_kthread_work(&einfo->kworker, &einfo->kwork);
break;
+ case WDSP_EVENT_PRE_SHUTDOWN:
+ ssr(&einfo->xprt_if);
+ break;
default:
pr_debug("%s: unhandled event %d", __func__, event);
break;
@@ -2040,7 +2046,6 @@ static int glink_spi_probe(struct platform_device *pdev)
init_xprt_cfg(einfo, subsys_name);
init_xprt_if(einfo);
- einfo->in_ssr = true;
einfo->fifo_size = DEFAULT_FIFO_SIZE;
init_kthread_work(&einfo->kwork, rx_worker);
init_kthread_worker(&einfo->kworker);
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index cdc07411b690..5e7f5c8bd2a1 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -33,7 +33,6 @@
#include <linux/dma-mapping.h>
#include <linux/qmi_encdec.h>
#include <linux/ipc_logging.h>
-#include <linux/msm-bus.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/qpnp/qpnp-adc.h>
@@ -108,6 +107,18 @@ module_param(qmi_timeout, ulong, 0600);
#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_HW_CONTROL BIT(1)
#define WCSS_CLK_CTL_WCSS_CSS_GDSCR_PWR_ON BIT(31)
+#define WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET 0x1D1030
+#define WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE BIT(0)
+
+#define WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET 0x1D1034
+#define WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL GENMASK(10, 8)
+
+#define WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET 0x1D602C
+#define WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE BIT(0)
+
+#define WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET 0x1D6030
+#define WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL GENMASK(10, 8)
+
/*
* Registers: WCSS_HM_A_WIFI_APB_3_A_WCMN_MAC_WCMN_REG
* Base Address: 0x18AF0000
@@ -260,7 +271,12 @@ void *icnss_ipc_log_context;
void *icnss_ipc_log_long_context;
#endif
-#define ICNSS_EVENT_PENDING 2989
+#define ICNSS_EVENT_PENDING 2989
+
+#define ICNSS_EVENT_SYNC BIT(0)
+#define ICNSS_EVENT_UNINTERRUPTIBLE BIT(1)
+#define ICNSS_EVENT_SYNC_UNINTERRUPTIBLE (ICNSS_EVENT_UNINTERRUPTIBLE | \
+ ICNSS_EVENT_SYNC)
enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
@@ -358,6 +374,8 @@ struct icnss_stats {
uint32_t pm_suspend_noirq_err;
uint32_t pm_resume_noirq;
uint32_t pm_resume_noirq_err;
+ uint32_t pm_stay_awake;
+ uint32_t pm_relax;
uint32_t ind_register_req;
uint32_t ind_register_resp;
@@ -404,8 +422,6 @@ static struct icnss_priv {
size_t smmu_iova_len;
dma_addr_t smmu_iova_ipa_start;
size_t smmu_iova_ipa_len;
- struct msm_bus_scale_pdata *bus_scale_table;
- uint32_t bus_client;
struct qmi_handle *wlfw_clnt;
struct list_head event_list;
spinlock_t event_lock;
@@ -435,7 +451,6 @@ static struct icnss_priv {
struct notifier_block get_service_nb;
void *modem_notify_handler;
struct notifier_block modem_ssr_nb;
- struct wakeup_source ws;
uint32_t diag_reg_read_addr;
uint32_t diag_reg_read_mem_type;
uint32_t diag_reg_read_len;
@@ -444,6 +459,7 @@ static struct icnss_priv {
struct qpnp_adc_tm_chip *adc_tm_dev;
struct qpnp_vadc_chip *vadc_dev;
uint64_t vph_pwr;
+ atomic_t pm_count;
} *penv;
static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
@@ -511,6 +527,35 @@ static int icnss_hw_poll_reg_field(void *base, u32 offset, u32 mask, u32 val,
return 0;
}
+static void icnss_pm_stay_awake(struct icnss_priv *priv)
+{
+ if (atomic_inc_return(&priv->pm_count) != 1)
+ return;
+
+ icnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n", priv->state,
+ atomic_read(&priv->pm_count));
+
+ pm_stay_awake(&priv->pdev->dev);
+
+ priv->stats.pm_stay_awake++;
+}
+
+static void icnss_pm_relax(struct icnss_priv *priv)
+{
+ int r = atomic_dec_return(&priv->pm_count);
+
+ WARN_ON(r < 0);
+
+ if (r != 0)
+ return;
+
+ icnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n", priv->state,
+ atomic_read(&priv->pm_count));
+
+ pm_relax(&priv->pdev->dev);
+ priv->stats.pm_relax++;
+}
+
static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
{
switch (type) {
@@ -534,16 +579,16 @@ static char *icnss_driver_event_to_str(enum icnss_driver_event_type type)
};
static int icnss_driver_event_post(enum icnss_driver_event_type type,
- bool sync, void *data)
+ u32 flags, void *data)
{
struct icnss_driver_event *event;
- unsigned long flags;
+ unsigned long irq_flags;
int gfp = GFP_KERNEL;
int ret = 0;
- icnss_pr_dbg("Posting event: %s: %s%s(%d), state: 0x%lx\n",
- current->comm, icnss_driver_event_to_str(type),
- sync ? "-sync" : "", type, penv->state);
+ icnss_pr_dbg("Posting event: %s(%d), %s, flags: 0x%x, state: 0x%lx\n",
+ icnss_driver_event_to_str(type), type, current->comm,
+ flags, penv->state);
if (type >= ICNSS_DRIVER_EVENT_MAX) {
icnss_pr_err("Invalid Event type: %d, can't post", type);
@@ -557,39 +602,47 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
if (event == NULL)
return -ENOMEM;
+ icnss_pm_stay_awake(penv);
+
event->type = type;
event->data = data;
init_completion(&event->complete);
event->ret = ICNSS_EVENT_PENDING;
- event->sync = sync;
+ event->sync = !!(flags & ICNSS_EVENT_SYNC);
- spin_lock_irqsave(&penv->event_lock, flags);
+ spin_lock_irqsave(&penv->event_lock, irq_flags);
list_add_tail(&event->list, &penv->event_list);
- spin_unlock_irqrestore(&penv->event_lock, flags);
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
penv->stats.events[type].posted++;
queue_work(penv->event_wq, &penv->event_work);
- if (!sync)
- return ret;
+ if (!(flags & ICNSS_EVENT_SYNC))
+ goto out;
- ret = wait_for_completion_interruptible(&event->complete);
+ if (flags & ICNSS_EVENT_UNINTERRUPTIBLE)
+ wait_for_completion(&event->complete);
+ else
+ ret = wait_for_completion_interruptible(&event->complete);
icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
icnss_driver_event_to_str(type), type, penv->state, ret,
event->ret);
- spin_lock_irqsave(&penv->event_lock, flags);
+ spin_lock_irqsave(&penv->event_lock, irq_flags);
if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
event->sync = false;
- spin_unlock_irqrestore(&penv->event_lock, flags);
- return ret;
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
+ ret = -EINTR;
+ goto out;
}
- spin_unlock_irqrestore(&penv->event_lock, flags);
+ spin_unlock_irqrestore(&penv->event_lock, irq_flags);
ret = event->ret;
kfree(event);
+out:
+ icnss_pm_relax(penv);
return ret;
}
@@ -1043,7 +1096,7 @@ static void icnss_hw_io_reset(struct icnss_priv *priv, bool on)
}
}
-int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
+static int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
{
u32 rdata;
@@ -1075,7 +1128,7 @@ int icnss_hw_reset_wlan_ss_power_down(struct icnss_priv *priv)
return 0;
}
-int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
+static int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
{
u32 rdata;
@@ -1120,7 +1173,7 @@ int icnss_hw_reset_common_ss_power_down(struct icnss_priv *priv)
}
-int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
+static int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
{
u32 rdata;
@@ -1150,7 +1203,7 @@ int icnss_hw_reset_wlan_rfactrl_power_down(struct icnss_priv *priv)
return 0;
}
-void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
+static void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
{
icnss_pr_dbg("RESET: WSI CMD Error recovery, state: 0x%lx\n",
priv->state);
@@ -1174,7 +1227,7 @@ void icnss_hw_wsi_cmd_error_recovery(struct icnss_priv *priv)
PMM_WSI_CMD_SW_BUS_SYNC, 0);
}
-u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
+static u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
{
u32 rdata = 0;
int ret;
@@ -1223,7 +1276,7 @@ u32 icnss_hw_rf_register_read_command(struct icnss_priv *priv, u32 addr)
return rdata;
}
-int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
+static int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
{
u32 rdata;
int ret;
@@ -1277,7 +1330,30 @@ int icnss_hw_reset_rf_reset_cmd(struct icnss_priv *priv)
return 0;
}
-int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
+static int icnss_hw_reset_switch_to_cxo(struct icnss_priv *priv)
+{
+ icnss_pr_dbg("RESET: Switch to CXO, state: 0x%lx\n", priv->state);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_NOC_CFG_RCGR_OFFSET,
+ WCSS_CLK_CTL_NOC_CFG_RCGR_SRC_SEL, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_NOC_CMD_RCGR_OFFSET,
+ WCSS_CLK_CTL_NOC_CMD_RCGR_UPDATE, 1);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_REF_CFG_RCGR_OFFSET,
+ WCSS_CLK_CTL_REF_CFG_RCGR_SRC_SEL, 0);
+
+ icnss_hw_write_reg_field(priv->mem_base_va,
+ WCSS_CLK_CTL_REF_CMD_RCGR_OFFSET,
+ WCSS_CLK_CTL_REF_CMD_RCGR_UPDATE, 1);
+
+ return 0;
+}
+
+static int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
{
int ret;
@@ -1325,7 +1401,7 @@ int icnss_hw_reset_xo_disable_cmd(struct icnss_priv *priv)
return 0;
}
-int icnss_hw_reset(struct icnss_priv *priv)
+static int icnss_hw_reset(struct icnss_priv *priv)
{
u32 rdata;
u32 rdata1;
@@ -1383,6 +1459,8 @@ int icnss_hw_reset(struct icnss_priv *priv)
icnss_hw_reset_rf_reset_cmd(priv);
+ icnss_hw_reset_switch_to_cxo(priv);
+
icnss_hw_reset_xo_disable_cmd(priv);
icnss_hw_write_reg_field(priv->mpm_config_va, MPM_WCSSAON_CONFIG_OFFSET,
@@ -2193,7 +2271,7 @@ static void icnss_qmi_wlfw_clnt_ind(struct qmi_handle *handle,
switch (msg_id) {
case QMI_WLFW_FW_READY_IND_V01:
icnss_driver_event_post(ICNSS_DRIVER_EVENT_FW_READY_IND,
- false, NULL);
+ 0, NULL);
break;
case QMI_WLFW_MSA_READY_IND_V01:
icnss_pr_dbg("Received MSA Ready Indication msg_id 0x%x\n",
@@ -2336,6 +2414,7 @@ static int icnss_call_driver_probe(struct icnss_priv *priv)
out:
icnss_hw_power_off(priv);
+ penv->ops = NULL;
return ret;
}
@@ -2381,8 +2460,6 @@ static int icnss_driver_event_fw_ready_ind(void *data)
if (!penv)
return -ENODEV;
- __pm_stay_awake(&penv->ws);
-
set_bit(ICNSS_FW_READY, &penv->state);
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
@@ -2400,10 +2477,7 @@ static int icnss_driver_event_fw_ready_ind(void *data)
else
ret = icnss_call_driver_probe(penv);
- __pm_relax(&penv->ws);
-
out:
- __pm_relax(&penv->ws);
return ret;
}
@@ -2414,8 +2488,6 @@ static int icnss_driver_event_register_driver(void *data)
if (penv->ops)
return -EEXIST;
- __pm_stay_awake(&penv->ws);
-
penv->ops = data;
if (test_bit(SKIP_QMI, &quirks))
@@ -2441,21 +2513,17 @@ static int icnss_driver_event_register_driver(void *data)
set_bit(ICNSS_DRIVER_PROBED, &penv->state);
- __pm_relax(&penv->ws);
-
return 0;
power_off:
icnss_hw_power_off(penv);
+ penv->ops = NULL;
out:
- __pm_relax(&penv->ws);
return ret;
}
static int icnss_driver_event_unregister_driver(void *data)
{
- __pm_stay_awake(&penv->ws);
-
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) {
penv->ops = NULL;
goto out;
@@ -2471,7 +2539,6 @@ static int icnss_driver_event_unregister_driver(void *data)
icnss_hw_power_off(penv);
out:
- __pm_relax(&penv->ws);
return 0;
}
@@ -2549,6 +2616,8 @@ static void icnss_driver_event_work(struct work_struct *work)
unsigned long flags;
int ret;
+ icnss_pm_stay_awake(penv);
+
spin_lock_irqsave(&penv->event_lock, flags);
while (!list_empty(&penv->event_list)) {
@@ -2608,6 +2677,8 @@ static void icnss_driver_event_work(struct work_struct *work)
spin_lock_irqsave(&penv->event_lock, flags);
}
spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ icnss_pm_relax(penv);
}
static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
@@ -2624,12 +2695,12 @@ static int icnss_qmi_wlfw_clnt_svc_event_notify(struct notifier_block *this,
switch (code) {
case QMI_SERVER_ARRIVE:
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
- false, NULL);
+ 0, NULL);
break;
case QMI_SERVER_EXIT:
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_SERVER_EXIT,
- false, NULL);
+ 0, NULL);
break;
default:
icnss_pr_dbg("Invalid code: %ld", code);
@@ -2666,7 +2737,7 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb,
event_data->crashed = notif->crashed;
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
- true, event_data);
+ ICNSS_EVENT_SYNC, event_data);
return NOTIFY_OK;
}
@@ -2741,7 +2812,7 @@ static int icnss_service_notifier_notify(struct notifier_block *nb,
event_data->crashed = true;
icnss_driver_event_post(ICNSS_DRIVER_EVENT_PD_SERVICE_DOWN,
- true, event_data);
+ ICNSS_EVENT_SYNC, event_data);
break;
case SERVREG_NOTIF_SERVICE_STATE_UP_V01:
icnss_pr_dbg("Service up, state: 0x%lx\n", priv->state);
@@ -2913,9 +2984,9 @@ int icnss_register_driver(struct icnss_driver_ops *ops)
}
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
- true, ops);
+ ICNSS_EVENT_SYNC, ops);
- if (ret == -ERESTARTSYS)
+ if (ret == -EINTR)
ret = 0;
out:
@@ -2941,7 +3012,7 @@ int icnss_unregister_driver(struct icnss_driver_ops *ops)
}
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
- true, NULL);
+ ICNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
out:
return ret;
}
@@ -3269,6 +3340,12 @@ int icnss_wlan_disable(enum icnss_driver_mode mode)
}
EXPORT_SYMBOL(icnss_wlan_disable);
+bool icnss_is_qmi_disable(void)
+{
+ return test_bit(SKIP_QMI, &quirks) ? true : false;
+}
+EXPORT_SYMBOL(icnss_is_qmi_disable);
+
int icnss_get_ce_id(int irq)
{
int i;
@@ -3369,66 +3446,9 @@ unsigned int icnss_socinfo_get_serial_number(struct device *dev)
}
EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
-static int icnss_bw_vote(struct icnss_priv *priv, int index)
-{
- int ret = 0;
-
- icnss_pr_dbg("Vote %d for msm_bus, state 0x%lx\n",
- index, priv->state);
- ret = msm_bus_scale_client_update_request(priv->bus_client, index);
- if (ret)
- icnss_pr_err("Fail to vote %d: ret %d, state 0x%lx\n",
- index, ret, priv->state);
-
- return ret;
-}
-
-static int icnss_bw_init(struct icnss_priv *priv)
-{
- int ret = 0;
-
- priv->bus_scale_table = msm_bus_cl_get_pdata(priv->pdev);
- if (!priv->bus_scale_table) {
- icnss_pr_err("Missing entry for msm_bus scale table\n");
- return -EINVAL;
- }
-
- priv->bus_client = msm_bus_scale_register_client(priv->bus_scale_table);
- if (!priv->bus_client) {
- icnss_pr_err("Fail to register with bus_scale client\n");
- ret = -EINVAL;
- goto out;
- }
-
- ret = icnss_bw_vote(priv, 1);
- if (ret)
- goto out;
-
- return 0;
-
-out:
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
- return ret;
-}
-
-static void icnss_bw_deinit(struct icnss_priv *priv)
-{
- if (!priv)
- return;
-
- if (priv->bus_client) {
- icnss_bw_vote(priv, 0);
- msm_bus_scale_unregister_client(priv->bus_client);
- }
-
- if (priv->bus_scale_table)
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
-}
-
static int icnss_smmu_init(struct icnss_priv *priv)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int s1_bypass = 1;
int ret = 0;
@@ -3445,15 +3465,6 @@ static int icnss_smmu_init(struct icnss_priv *priv)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret < 0) {
- icnss_pr_err("Set disable_htw attribute failed, err = %d\n",
- ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret < 0) {
@@ -3839,6 +3850,7 @@ static int icnss_stats_show_state(struct seq_file *s, struct icnss_priv *priv)
seq_puts(s, "MSA0 ASSIGNED");
continue;
case ICNSS_WLFW_EXISTS:
+ seq_puts(s, "WLAN FW EXISTS");
continue;
}
@@ -3943,6 +3955,8 @@ static int icnss_stats_show(struct seq_file *s, void *data)
ICNSS_STATS_DUMP(s, priv, pm_suspend_noirq_err);
ICNSS_STATS_DUMP(s, priv, pm_resume_noirq);
ICNSS_STATS_DUMP(s, priv, pm_resume_noirq_err);
+ ICNSS_STATS_DUMP(s, priv, pm_stay_awake);
+ ICNSS_STATS_DUMP(s, priv, pm_relax);
icnss_stats_show_irqs(s, priv);
@@ -4385,22 +4399,16 @@ static int icnss_probe(struct platform_device *pdev)
priv->smmu_iova_len);
goto out;
}
-
- ret = icnss_bw_init(priv);
- if (ret)
- goto out_smmu_deinit;
}
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
- wakeup_source_init(&priv->ws, "icnss_ws");
-
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
ret = -EFAULT;
- goto out_bw_deinit;
+ goto out_smmu_deinit;
}
INIT_WORK(&priv->event_work, icnss_driver_event_work);
@@ -4428,8 +4436,6 @@ static int icnss_probe(struct platform_device *pdev)
out_destroy_wq:
destroy_workqueue(priv->event_wq);
-out_bw_deinit:
- icnss_bw_deinit(priv);
out_smmu_deinit:
icnss_smmu_deinit(priv);
out:
@@ -4455,10 +4461,6 @@ static int icnss_remove(struct platform_device *pdev)
if (penv->event_wq)
destroy_workqueue(penv->event_wq);
- icnss_bw_deinit(penv);
-
- wakeup_source_trash(&penv->ws);
-
icnss_hw_power_off(penv);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index 881359d444fc..cd3d387645fd 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@ static int spinlocks_initialized;
static void *smem_ramdump_dev;
static DEFINE_MUTEX(spinlock_init_lock);
static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
static int smem_module_inited;
static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -1047,7 +1048,8 @@ static __init int modem_restart_late_init(void)
void *handle;
struct restart_notifier_block *nb;
- smem_ramdump_dev = create_ramdump_device("smem", NULL);
+ if (smem_dev)
+ smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
LOG_ERR("%s: Unable to create smem ramdump device.\n",
__func__);
@@ -1444,7 +1446,7 @@ smem_targ_info_done:
SMEM_INFO("smem security enabled\n");
smem_init_security();
}
-
+ smem_dev = &pdev->dev;
probe_done = true;
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index dfd6b448a65f..e8969a5e533b 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -324,6 +324,7 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, uint32_t dl,
pr_err("%s: glink_open failed %s\n", __func__,
svc_names[dest][clnt]);
apr_ch->handle = NULL;
+ rc = -EINVAL;
goto unlock;
}
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 9b44fb03cf94..83e3775ed533 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -741,7 +741,6 @@ static int msm_audio_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
int ret;
- int disable_htw = 1;
mapping = arm_iommu_create_mapping(
msm_iommu_get_bus(dev),
@@ -750,10 +749,6 @@ static int msm_audio_smmu_init(struct device *dev)
if (IS_ERR(mapping))
return PTR_ERR(mapping);
- iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
-
ret = arm_iommu_attach_device(dev, mapping);
if (ret) {
dev_err(dev, "%s: Attach failed, err = %d\n",
diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c
index 67c58d1e6d4c..50dd9256b270 100644
--- a/drivers/soc/qcom/qdsp6v2/voice_svc.c
+++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c
@@ -223,8 +223,8 @@ static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
} else if (!strcmp(apr_request->svc_name, VOICE_SVC_MVM_STR)) {
apr_handle = prtd->apr_q6_mvm;
} else {
- pr_err("%s: Invalid service %s\n", __func__,
- apr_request->svc_name);
+ pr_err("%s: Invalid service %.*s\n", __func__,
+ MAX_APR_SERVICE_NAME_LEN, apr_request->svc_name);
ret = -EINVAL;
goto done;
@@ -338,8 +338,8 @@ static int process_reg_cmd(struct voice_svc_register *apr_reg_svc,
svc = VOICE_SVC_CVS_STR;
handle = &prtd->apr_q6_cvs;
} else {
- pr_err("%s: Invalid Service: %s\n", __func__,
- apr_reg_svc->svc_name);
+ pr_err("%s: Invalid Service: %.*s\n", __func__,
+ MAX_APR_SERVICE_NAME_LEN, apr_reg_svc->svc_name);
ret = -EINVAL;
goto done;
}
@@ -365,7 +365,17 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
pr_debug("%s\n", __func__);
- data = kmalloc(count, GFP_KERNEL);
+ /*
+ * Check if enough memory is allocated to parse the message type.
+ * Will check there is enough to hold the payload later.
+ */
+ if (count >= sizeof(struct voice_svc_write_msg)) {
+ data = kmalloc(count, GFP_KERNEL);
+ } else {
+ pr_debug("%s: invalid data size\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
if (data == NULL) {
pr_err("%s: data kmalloc failed.\n", __func__);
@@ -383,7 +393,7 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
}
cmd = data->msg_type;
- prtd = (struct voice_svc_prvt *)file->private_data;
+ prtd = (struct voice_svc_prvt *) file->private_data;
if (prtd == NULL) {
pr_err("%s: prtd is NULL\n", __func__);
@@ -393,9 +403,13 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
switch (cmd) {
case MSG_REGISTER:
- if (count >=
- (sizeof(struct voice_svc_register) +
- sizeof(*data))) {
+ /*
+ * Check that count reflects the expected size to ensure
+ * sufficient memory was allocated. Since voice_svc_register
+ * has a static size, this should be exact.
+ */
+ if (count == (sizeof(struct voice_svc_write_msg) +
+ sizeof(struct voice_svc_register))) {
ret = process_reg_cmd(
(struct voice_svc_register *)data->payload, prtd);
if (!ret)
@@ -407,8 +421,13 @@ static ssize_t voice_svc_write(struct file *file, const char __user *buf,
}
break;
case MSG_REQUEST:
- if (count >= (sizeof(struct voice_svc_cmd_request) +
- sizeof(*data))) {
+ /*
+ * Check that count reflects the expected size to ensure
+ * sufficient memory was allocated. Since voice_svc_cmd_request
+ * has a variable size, check the minimum value count must be.
+ */
+ if (count >= (sizeof(struct voice_svc_write_msg) +
+ sizeof(struct voice_svc_cmd_request))) {
ret = voice_svc_send_req(
(struct voice_svc_cmd_request *)data->payload, prtd);
if (!ret)
diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
index ab43bbb7e86a..eee42d7ba314 100644
--- a/drivers/soc/qcom/qsee_ipc_irq_bridge.c
+++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
@@ -115,10 +115,8 @@ static struct qiib_driver_data *qiib_info;
static int qiib_driver_data_init(void)
{
qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
- if (!qiib_info) {
- QIIB_ERR("Unable to allocate info pointer\n");
+ if (!qiib_info)
return -ENOMEM;
- }
INIT_LIST_HEAD(&qiib_info->list);
mutex_init(&qiib_info->list_lock);
@@ -356,6 +354,7 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
const char *dev_name;
uint32_t irqtype;
uint32_t irq_clear[2];
+ struct irq_data *irqtype_data;
int ret = -ENODEV;
key = "qcom,dev-name";
@@ -374,7 +373,12 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
}
QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
- irqtype = irqd_get_trigger_type(irq_get_irq_data(devp->irq_line));
+ irqtype_data = irq_get_irq_data(devp->irq_line);
+ if (!irqtype_data) {
+ QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+ goto missing_key;
+ }
+ irqtype = irqd_get_trigger_type(irqtype_data);
QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
key = "label";
diff --git a/drivers/soc/qcom/rpm-smd-debug.c b/drivers/soc/qcom/rpm-smd-debug.c
index c08668149636..4e406f7cd379 100644
--- a/drivers/soc/qcom/rpm-smd-debug.c
+++ b/drivers/soc/qcom/rpm-smd-debug.c
@@ -104,8 +104,6 @@ static ssize_t rsc_ops_write(struct file *fp, const char __user *user_buffer,
if (msm_rpm_wait_for_ack(msm_rpm_send_request(req)))
pr_err("Sending the RPM message failed\n");
- else
- pr_info("RPM message sent succesfully\n");
err_request:
msm_rpm_free_request(req);
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
index 03a1591e5b09..242071f52811 100644
--- a/drivers/soc/qcom/rpm-smd.c
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -967,8 +967,10 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
- if (!cdata->client_buf)
- goto cdata_alloc_fail;
+ if (!cdata->client_buf) {
+ pr_warn("Cannot allocate memory for client_buf\n");
+ goto client_buf_alloc_fail;
+ }
set_set_type(cdata->client_buf, set);
set_rsc_type(cdata->client_buf, rsc_type);
@@ -997,6 +999,8 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
buf_alloc_fail:
kfree(cdata->kvp);
kvp_alloc_fail:
+ kfree(cdata->client_buf);
+client_buf_alloc_fail:
kfree(cdata);
cdata_alloc_fail:
return NULL;
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 504a3263253c..8cba88742cb8 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -462,7 +462,7 @@ static int ssr_event_notify(struct notifier_block *this,
struct qmi_client_info, ssr_notifier);
struct notif_data *notif = data;
switch (code) {
- case SUBSYS_AFTER_SHUTDOWN:
+ case SUBSYS_BEFORE_SHUTDOWN:
pr_debug("Root PD DOWN(SSR notification), crashed?%d\n",
notif->crashed);
if (notif->crashed)
@@ -605,8 +605,8 @@ void *service_notif_register_notifier(const char *service_path, int instance_id,
if (!service_path || !instance_id || !nb)
return ERR_PTR(-EINVAL);
- service_notif = _find_service_info(service_path);
mutex_lock(&notif_add_lock);
+ service_notif = _find_service_info(service_path);
if (!service_notif) {
service_notif = (struct service_notif_info *)add_service_notif(
service_path,
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 8f58eaa537b1..470ecfdd9f5e 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -701,7 +701,7 @@ static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
wdog_dd->user_pet_complete = true;
wdog_dd->user_pet_enabled = false;
wake_up_process(wdog_dd->watchdog_task);
- init_timer(&wdog_dd->pet_timer);
+ init_timer_deferrable(&wdog_dd->pet_timer);
wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
wdog_dd->pet_timer.function = pet_task_wakeup;
wdog_dd->pet_timer.expires = jiffies + delay_time;
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 310903b10a98..97d922fa5724 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -32,6 +32,7 @@
#define WDSP_EDGE "wdsp"
#define RESP_QUEUE_SIZE 3
#define QOS_PKT_SIZE 1024
+#define TIMEOUT_MS 1000
struct wdsp_glink_dev {
struct class *cls;
@@ -71,7 +72,19 @@ struct wdsp_glink_ch {
/* To free up the channel memory */
bool free_mem;
- /* Glink channel configuration */
+ /* Glink local channel open work */
+ struct work_struct lcl_ch_open_wrk;
+
+ /* Glink local channel close work */
+ struct work_struct lcl_ch_cls_wrk;
+
+ /* Wait for ch connect state before sending any command */
+ wait_queue_head_t ch_connect_wait;
+
+ /*
+ * Glink channel configuration. This has to be the last
+ * member of the strucuture as it has variable size
+ */
struct wdsp_glink_ch_cfg ch_cfg;
};
@@ -89,12 +102,15 @@ struct wdsp_glink_priv {
struct mutex rsp_mutex;
/* Glink channel related */
+ struct mutex glink_mutex;
struct wdsp_glink_state glink_state;
struct wdsp_glink_ch **ch;
u8 no_of_channels;
struct work_struct ch_open_cls_wrk;
struct workqueue_struct *work_queue;
+ wait_queue_head_t link_state_wait;
+
struct device *dev;
};
@@ -200,9 +216,9 @@ static bool wdsp_glink_notify_rx_intent_req(void *handle, const void *priv,
mutex_lock(&ch->mutex);
rc = glink_queue_rx_intent(ch->handle, ch, req_size);
- if (IS_ERR_VALUE(ret)) {
- dev_err(wpriv->dev, "%s: Failed to queue rx intent\n",
- __func__);
+ if (IS_ERR_VALUE(rc)) {
+ dev_err(wpriv->dev, "%s: Failed to queue rx intent, rc = %d\n",
+ __func__, rc);
mutex_unlock(&ch->mutex);
goto done;
}
@@ -214,6 +230,36 @@ done:
}
/*
+ * wdsp_glink_lcl_ch_open_wrk - Work function to open channel again
+ * when local disconnect event happens
+ * work: Work structure
+ */
+static void wdsp_glink_lcl_ch_open_wrk(struct work_struct *work)
+{
+ struct wdsp_glink_ch *ch;
+
+ ch = container_of(work, struct wdsp_glink_ch,
+ lcl_ch_open_wrk);
+
+ wdsp_glink_open_ch(ch);
+}
+
+/*
+ * wdsp_glink_lcl_ch_cls_wrk - Work function to close channel locally
+ * when remote disconnect event happens
+ * work: Work structure
+ */
+static void wdsp_glink_lcl_ch_cls_wrk(struct work_struct *work)
+{
+ struct wdsp_glink_ch *ch;
+
+ ch = container_of(work, struct wdsp_glink_ch,
+ lcl_ch_cls_wrk);
+
+ wdsp_glink_close_ch(ch);
+}
+
+/*
* wdsp_glink_notify_state - Glink channel state information event callback
* handle: Opaque Channel handle returned by GLink
* priv: Private pointer to the channel
@@ -258,6 +304,7 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
__func__, ch->ch_cfg.latency_in_us,
ch->ch_cfg.name);
+ wake_up(&ch->ch_connect_wait);
mutex_unlock(&ch->mutex);
} else if (event == GLINK_LOCAL_DISCONNECTED) {
/*
@@ -271,6 +318,9 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
if (ch->free_mem) {
kfree(ch);
ch = NULL;
+ } else {
+ /* Open the glink channel again */
+ queue_work(wpriv->work_queue, &ch->lcl_ch_open_wrk);
}
} else if (event == GLINK_REMOTE_DISCONNECTED) {
dev_dbg(wpriv->dev, "%s: remote channel: %s disconnected remotely\n",
@@ -278,10 +328,10 @@ static void wdsp_glink_notify_state(void *handle, const void *priv,
mutex_unlock(&ch->mutex);
/*
* If remote disconnect happens, local side also has
- * to close the channel and reopen again as per glink
+ * to close the channel as per glink design in a
+ * separate work_queue.
*/
- if (!wdsp_glink_close_ch(ch))
- wdsp_glink_open_ch(ch);
+ queue_work(wpriv->work_queue, &ch->lcl_ch_cls_wrk);
}
}
@@ -294,16 +344,23 @@ static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch)
struct wdsp_glink_priv *wpriv = ch->wpriv;
int ret = 0;
+ mutex_lock(&wpriv->glink_mutex);
+ if (ch->handle) {
+ ret = glink_close(ch->handle);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
+ __func__, ret);
+ } else {
+ ch->handle = NULL;
+ dev_dbg(wpriv->dev, "%s: ch %s is closed\n", __func__,
+ ch->ch_cfg.name);
+ }
+ } else {
+ dev_dbg(wpriv->dev, "%s: ch %s is already closed\n", __func__,
+ ch->ch_cfg.name);
+ }
+ mutex_unlock(&wpriv->glink_mutex);
- mutex_lock(&ch->mutex);
-
- dev_dbg(wpriv->dev, "%s: ch %s closing\n", __func__, ch->ch_cfg.name);
- ret = glink_close(ch->handle);
- if (IS_ERR_VALUE(ret))
- dev_err(wpriv->dev, "%s: glink_close is failed, ret = %d\n",
- __func__, ret);
-
- mutex_unlock(&ch->mutex);
return ret;
}
@@ -318,29 +375,34 @@ static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch)
struct glink_open_config open_cfg;
int ret = 0;
- memset(&open_cfg, 0, sizeof(open_cfg));
- open_cfg.options = GLINK_OPT_INITIAL_XPORT;
- open_cfg.edge = WDSP_EDGE;
- open_cfg.notify_rx = wdsp_glink_notify_rx;
- open_cfg.notify_tx_done = wdsp_glink_notify_tx_done;
- open_cfg.notify_state = wdsp_glink_notify_state;
- open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req;
- open_cfg.priv = ch;
- open_cfg.name = ch->ch_cfg.name;
-
- dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n",
- __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us,
- ch->ch_cfg.no_of_intents);
-
- mutex_lock(&ch->mutex);
- ch->handle = glink_open(&open_cfg);
- if (IS_ERR_OR_NULL(ch->handle)) {
- dev_err(wpriv->dev, "%s: glink_open failed %s\n",
- __func__, ch->ch_cfg.name);
- ch->handle = NULL;
- ret = -EINVAL;
+ mutex_lock(&wpriv->glink_mutex);
+ if (!ch->handle) {
+ memset(&open_cfg, 0, sizeof(open_cfg));
+ open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+ open_cfg.edge = WDSP_EDGE;
+ open_cfg.notify_rx = wdsp_glink_notify_rx;
+ open_cfg.notify_tx_done = wdsp_glink_notify_tx_done;
+ open_cfg.notify_state = wdsp_glink_notify_state;
+ open_cfg.notify_rx_intent_req = wdsp_glink_notify_rx_intent_req;
+ open_cfg.priv = ch;
+ open_cfg.name = ch->ch_cfg.name;
+
+ dev_dbg(wpriv->dev, "%s: ch->ch_cfg.name = %s, latency_in_us = %d, intents = %d\n",
+ __func__, ch->ch_cfg.name, ch->ch_cfg.latency_in_us,
+ ch->ch_cfg.no_of_intents);
+
+ ch->handle = glink_open(&open_cfg);
+ if (IS_ERR_OR_NULL(ch->handle)) {
+ dev_err(wpriv->dev, "%s: glink_open failed for ch %s\n",
+ __func__, ch->ch_cfg.name);
+ ch->handle = NULL;
+ ret = -EINVAL;
+ }
+ } else {
+ dev_err(wpriv->dev, "%s: ch %s is already opened\n", __func__,
+ ch->ch_cfg.name);
}
- mutex_unlock(&ch->mutex);
+ mutex_unlock(&wpriv->glink_mutex);
return ret;
}
@@ -354,7 +416,7 @@ static void wdsp_glink_close_all_ch(struct wdsp_glink_priv *wpriv)
int i;
for (i = 0; i < wpriv->no_of_channels; i++)
- if (wpriv->ch[i])
+ if (wpriv->ch && wpriv->ch[i])
wdsp_glink_close_ch(wpriv->ch[i]);
}
@@ -425,7 +487,12 @@ static void wdsp_glink_link_state_cb(struct glink_link_state_cb_info *cb_info,
}
wpriv = (struct wdsp_glink_priv *)priv;
+
+ mutex_lock(&wpriv->glink_mutex);
wpriv->glink_state.link_state = cb_info->link_state;
+ wake_up(&wpriv->link_state_wait);
+ mutex_unlock(&wpriv->glink_mutex);
+
queue_work(wpriv->work_queue, &wpriv->ch_open_cls_wrk);
}
@@ -477,6 +544,9 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
mutex_init(&ch[i]->mutex);
ch[i]->wpriv = wpriv;
+ INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
+ INIT_WORK(&ch[i]->lcl_ch_cls_wrk, wdsp_glink_lcl_ch_cls_wrk);
+ init_waitqueue_head(&ch[i]->ch_connect_wait);
}
wpriv->ch = ch;
wpriv->no_of_channels = no_of_channels;
@@ -540,15 +610,26 @@ static void wdsp_glink_tx_buf_work(struct work_struct *work)
ret = glink_tx(ch->handle, tx_buf,
cpkt->payload, cpkt->payload_size,
GLINK_TX_REQ_INTENT);
- if (IS_ERR_VALUE(ret))
+ if (IS_ERR_VALUE(ret)) {
dev_err(wpriv->dev, "%s: glink tx failed, ret = %d\n",
__func__, ret);
+ /*
+ * If glink_tx() is failed then free tx_buf here as
+ * there won't be any tx_done notification to
+ * free the buffer.
+ */
+ kfree(tx_buf);
+ }
} else {
dev_err(wpriv->dev, "%s: channel %s is not in connected state\n",
__func__, ch->ch_cfg.name);
+ /*
+ * Free tx_buf here as there won't be any tx_done
+ * notification in this case also.
+ */
+ kfree(tx_buf);
}
mutex_unlock(&tx_buf->ch->mutex);
-
}
/*
@@ -578,10 +659,13 @@ static ssize_t wdsp_glink_read(struct file *file, char __user *buf,
count = WDSP_MAX_READ_SIZE;
}
/*
- * This is unblocked only from glink rx notification callback
- * or from flush API.
+ * Complete signal has given from glink rx notification callback
+ * or from flush API. Also use interruptible wait_for_completion API
+ * to allow the system to go in suspend.
*/
- wait_for_completion(&wpriv->rsp_complete);
+ ret = wait_for_completion_interruptible(&wpriv->rsp_complete);
+ if (ret)
+ goto done;
mutex_lock(&wpriv->rsp_mutex);
if (wpriv->rsp_cnt) {
@@ -678,7 +762,32 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
__func__, ret);
kfree(tx_buf);
break;
+ case WDSP_READY_PKT:
+ ret = wait_event_timeout(wpriv->link_state_wait,
+ (wpriv->glink_state.link_state ==
+ GLINK_LINK_STATE_UP),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(wpriv->dev, "%s: Link state wait timeout\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ goto free_buf;
+ }
+ ret = 0;
+ kfree(tx_buf);
+ break;
case WDSP_CMD_PKT:
+ mutex_lock(&wpriv->glink_mutex);
+ if (wpriv->glink_state.link_state == GLINK_LINK_STATE_DOWN) {
+ mutex_unlock(&wpriv->glink_mutex);
+ dev_err(wpriv->dev, "%s: Link state is Down\n",
+ __func__);
+
+ ret = -ENETRESET;
+ goto free_buf;
+ }
+ mutex_unlock(&wpriv->glink_mutex);
+
cpkt = (struct wdsp_cmd_pkt *)wpkt->payload;
dev_dbg(wpriv->dev, "%s: requested ch_name: %s\n", __func__,
cpkt->ch_name);
@@ -696,6 +805,20 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
+
+ ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
+ (tx_buf->ch->channel_state ==
+ GLINK_CONNECTED),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ dev_err(wpriv->dev, "%s: glink channel %s is not in connected state %d\n",
+ __func__, tx_buf->ch->ch_cfg.name,
+ tx_buf->ch->channel_state);
+ ret = -ETIMEDOUT;
+ goto free_buf;
+ }
+ ret = 0;
+
INIT_WORK(&tx_buf->tx_work, wdsp_glink_tx_buf_work);
queue_work(wpriv->work_queue, &tx_buf->tx_work);
break;
@@ -747,7 +870,9 @@ static int wdsp_glink_open(struct inode *inode, struct file *file)
}
init_completion(&wpriv->rsp_complete);
+ init_waitqueue_head(&wpriv->link_state_wait);
mutex_init(&wpriv->rsp_mutex);
+ mutex_init(&wpriv->glink_mutex);
file->private_data = wpriv;
goto done;
@@ -801,28 +926,39 @@ static int wdsp_glink_release(struct inode *inode, struct file *file)
goto done;
}
+ if (wpriv->glink_state.handle)
+ glink_unregister_link_state_cb(wpriv->glink_state.handle);
+
flush_workqueue(wpriv->work_queue);
+ destroy_workqueue(wpriv->work_queue);
+
/*
* Clean up glink channel memory in channel state
* callback only if close channels are called from here.
*/
if (wpriv->ch) {
- for (i = 0; i < wpriv->no_of_channels; i++)
- if (wpriv->ch[i])
+ for (i = 0; i < wpriv->no_of_channels; i++) {
+ if (wpriv->ch[i]) {
wpriv->ch[i]->free_mem = true;
+ /*
+ * Channel handle NULL means channel is already
+ * closed. Free the channel memory here itself.
+ */
+ if (!wpriv->ch[i]->handle) {
+ kfree(wpriv->ch[i]);
+ wpriv->ch[i] = NULL;
+ } else {
+ wdsp_glink_close_ch(wpriv->ch[i]);
+ }
+ }
+ }
- wdsp_glink_close_all_ch(wpriv);
kfree(wpriv->ch);
wpriv->ch = NULL;
}
- if (wpriv->glink_state.handle)
- glink_unregister_link_state_cb(wpriv->glink_state.handle);
-
+ mutex_destroy(&wpriv->glink_mutex);
mutex_destroy(&wpriv->rsp_mutex);
- if (wpriv->work_queue)
- destroy_workqueue(wpriv->work_queue);
-
kfree(wpriv);
file->private_data = NULL;
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6ceac4f2d4b2..560c5c72daeb 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -103,6 +103,7 @@ struct cpufreq_cooling_device {
int dyn_power_table_entries;
struct device *cpu_dev;
get_static_t plat_get_static_power;
+ struct cpu_cooling_ops *plat_ops;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
@@ -504,8 +505,13 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
- *state = cpufreq_device->cpufreq_state;
+ if (cpufreq_device->plat_ops
+ && cpufreq_device->plat_ops->get_cur_state)
+ cpufreq_device->plat_ops->get_cur_state(cpu, state);
+ else
+ *state = cpufreq_device->cpufreq_state;
return 0;
}
@@ -539,7 +545,17 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
cpufreq_device->cpufreq_state = state;
cpufreq_device->clipped_freq = clip_freq;
- cpufreq_update_policy(cpu);
+ /* Check if the device has a platform mitigation function that
+ * can handle the CPU freq mitigation, if not, notify cpufreq
+ * framework.
+ */
+ if (cpufreq_device->plat_ops) {
+ if (cpufreq_device->plat_ops->ceil_limit)
+ cpufreq_device->plat_ops->ceil_limit(cpu,
+ clip_freq);
+ } else {
+ cpufreq_update_policy(cpu);
+ }
return 0;
}
@@ -773,6 +789,9 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
* @capacitance: dynamic power coefficient for these cpus
* @plat_static_func: function to calculate the static power consumed by these
* cpus (optional)
+ * @plat_mitig_func: function that does the mitigation by changing the
+ * frequencies (Optional). By default, cpufreq framweork will
+ * be notified of the new limits.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -785,7 +804,8 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table,
static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus, u32 capacitance,
- get_static_t plat_static_func)
+ get_static_t plat_static_func,
+ struct cpu_cooling_ops *plat_ops)
{
struct thermal_cooling_device *cool_dev;
struct cpufreq_cooling_device *cpufreq_dev;
@@ -851,6 +871,8 @@ __cpufreq_cooling_register(struct device_node *np,
}
}
+ cpufreq_dev->plat_ops = plat_ops;
+
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
cool_dev = ERR_PTR(ret);
@@ -924,7 +946,7 @@ free_cdev:
struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
@@ -948,7 +970,7 @@ of_cpufreq_cooling_register(struct device_node *np,
if (!np)
return ERR_PTR(-EINVAL);
- return __cpufreq_cooling_register(np, clip_cpus, 0, NULL);
+ return __cpufreq_cooling_register(np, clip_cpus, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
@@ -978,11 +1000,31 @@ cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance,
get_static_t plat_static_func)
{
return __cpufreq_cooling_register(NULL, clip_cpus, capacitance,
- plat_static_func);
+ plat_static_func, NULL);
}
EXPORT_SYMBOL(cpufreq_power_cooling_register);
/**
+ * cpufreq_platform_cooling_register() - create cpufreq cooling device with
+ * additional platform specific mitigation function.
+ *
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ * @plat_ops: the platform mitigation functions that will be called insted of
+ * cpufreq, if provided.
+ *
+ * Return: a valid struct thermal_cooling_device pointer on success,
+ * on failure, it returns a corresponding ERR_PTR().
+ */
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *plat_ops)
+{
+ return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL,
+ plat_ops);
+}
+EXPORT_SYMBOL(cpufreq_platform_cooling_register);
+
+/**
* of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen
@@ -1015,7 +1057,7 @@ of_cpufreq_power_cooling_register(struct device_node *np,
return ERR_PTR(-EINVAL);
return __cpufreq_cooling_register(np, clip_cpus, capacitance,
- plat_static_func);
+ plat_static_func, NULL);
}
EXPORT_SYMBOL(of_cpufreq_power_cooling_register);
diff --git a/drivers/thermal/lmh_lite.c b/drivers/thermal/lmh_lite.c
index 32a573d22270..44ceb723d34c 100644
--- a/drivers/thermal/lmh_lite.c
+++ b/drivers/thermal/lmh_lite.c
@@ -70,8 +70,8 @@
int idx = 0; \
desc_arg.args[cmd_idx] = cmd_buf.list_start = next; \
trace_lmh_event_call("GET_TYPE enter"); \
- dmac_flush_range(payload, payload + sizeof(uint32_t) * \
- LMH_SCM_PAYLOAD_SIZE); \
+ dmac_flush_range(payload, (void *)payload + \
+ sizeof(uint32_t) * LMH_SCM_PAYLOAD_SIZE);\
if (!is_scm_armv8()) { \
ret = scm_call(SCM_SVC_LMH, cmd_id, \
(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), \
@@ -321,7 +321,8 @@ static void lmh_read_and_update(struct lmh_driver_data *lmh_dat)
= SCM_BUFFER_SIZE(struct lmh_sensor_packet);
desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
trace_lmh_event_call("GET_INTENSITY enter");
- dmac_flush_range(&payload, &payload + sizeof(struct lmh_sensor_packet));
+ dmac_flush_range(&payload, (void *)&payload +
+ sizeof(struct lmh_sensor_packet));
if (!is_scm_armv8())
ret = scm_call(SCM_SVC_LMH, LMH_GET_INTENSITY,
(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
@@ -664,7 +665,7 @@ static int lmh_get_sensor_list(void)
lmh_sensor_packet);
desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
trace_lmh_event_call("GET_SENSORS enter");
- dmac_flush_range(payload, payload + buf_size);
+ dmac_flush_range(payload, (void *)payload + buf_size);
if (!is_scm_armv8())
ret = scm_call(SCM_SVC_LMH, LMH_GET_SENSORS,
(void *) &cmd_buf,
@@ -898,7 +899,7 @@ static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf)
desc_arg.args[1] = cmd_buf.buf_size = curr_size;
desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
trace_lmh_event_call("GET_DEBUG_READ enter");
- dmac_flush_range(payload, payload + curr_size);
+ dmac_flush_range(payload, (void *)payload + curr_size);
if (!is_scm_armv8()) {
ret = scm_call(SCM_SVC_LMH, LMH_DEBUG_READ,
(void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf),
@@ -968,7 +969,7 @@ static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL,
SCM_VAL);
trace_lmh_event_call("CONFIG_DEBUG_WRITE enter");
- dmac_flush_range(payload, payload + size_bytes);
+ dmac_flush_range(payload, (void *)payload + size_bytes);
if (!is_scm_armv8())
ret = scm_call(SCM_SVC_LMH, cmd_id, (void *) &cmd_buf,
SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c
index cd45eeccbfe7..3758e39a1c02 100644
--- a/drivers/thermal/msm_lmh_dcvs.c
+++ b/drivers/thermal/msm_lmh_dcvs.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/pm_opp.h>
+#include <linux/cpu_cooling.h>
#include <asm/smp_plat.h>
#include <asm/cacheflush.h>
@@ -40,6 +41,7 @@
#define MSM_LIMITS_NODE_DCVS 0x44435653
#define MSM_LIMITS_SUB_FN_THERMAL 0x54484D4C
+#define MSM_LIMITS_SUB_FN_GENERAL 0x47454E00
#define MSM_LIMITS_ALGO_MODE_ENABLE 0x454E424C
@@ -49,6 +51,8 @@
#define MSM_LIMITS_CLUSTER_0 0x6370302D
#define MSM_LIMITS_CLUSTER_1 0x6370312D
+#define MSM_LIMITS_DOMAIN_MAX 0x444D4158
+
#define MSM_LIMITS_HIGH_THRESHOLD_VAL 95000
#define MSM_LIMITS_ARM_THRESHOLD_VAL 65000
#define MSM_LIMITS_POLLING_DELAY_MS 10
@@ -77,8 +81,12 @@ struct msm_lmh_dcvs_hw {
cpumask_t core_map;
struct timer_list poll_timer;
uint32_t max_freq;
+ uint32_t hw_freq_limit;
+ struct list_head list;
};
+LIST_HEAD(lmh_dcvs_hw_list);
+
static void msm_lmh_dcvs_get_max_freq(uint32_t cpu, uint32_t *max_freq)
{
unsigned long freq_ceil = UINT_MAX;
@@ -99,12 +107,29 @@ static void msm_lmh_dcvs_get_max_freq(uint32_t cpu, uint32_t *max_freq)
static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
{
uint32_t max_limit = 0, val = 0;
+ struct device *cpu_dev = NULL;
+ unsigned long freq_val;
val = readl_relaxed(hw->osm_hw_reg);
dcvsh_get_frequency(val, max_limit);
+ cpu_dev = get_cpu_device(cpumask_first(&hw->core_map));
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n",
+ cpumask_first(&hw->core_map));
+ goto notify_exit;
+ }
+
+ freq_val = max_limit;
+ rcu_read_lock();
+ dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+ rcu_read_unlock();
+ max_limit = freq_val;
+
sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
+notify_exit:
+ hw->hw_freq_limit = max_limit;
return max_limit;
}
@@ -164,7 +189,7 @@ static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn,
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
SCM_VAL, SCM_VAL);
- dmac_flush_range(payload, payload + 5 * (sizeof(uint32_t)));
+ dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
kfree(payload);
@@ -250,6 +275,45 @@ static int trip_notify(enum thermal_trip_type type, int temp, void *data)
return 0;
}
+static struct msm_lmh_dcvs_hw *get_dcvsh_hw_from_cpu(int cpu)
+{
+ struct msm_lmh_dcvs_hw *hw;
+
+ list_for_each_entry(hw, &lmh_dcvs_hw_list, list) {
+ if (cpumask_test_cpu(cpu, &hw->core_map))
+ return hw;
+ }
+
+ return NULL;
+}
+
+static int lmh_set_max_limit(int cpu, u32 freq)
+{
+ struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+ if (!hw)
+ return -EINVAL;
+
+ return msm_lmh_dcvs_write(hw->affinity, MSM_LIMITS_SUB_FN_GENERAL,
+ MSM_LIMITS_DOMAIN_MAX, freq);
+}
+
+static int lmh_get_cur_limit(int cpu, unsigned long *freq)
+{
+ struct msm_lmh_dcvs_hw *hw = get_dcvsh_hw_from_cpu(cpu);
+
+ if (!hw)
+ return -EINVAL;
+ *freq = hw->hw_freq_limit;
+
+ return 0;
+}
+
+static struct cpu_cooling_ops cd_ops = {
+ .get_cur_state = lmh_get_cur_limit,
+ .ceil_limit = lmh_set_max_limit,
+};
+
static int msm_lmh_dcvs_probe(struct platform_device *pdev)
{
int ret;
@@ -257,6 +321,7 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
struct msm_lmh_dcvs_hw *hw;
char sensor_name[] = "limits_sensor-00";
struct thermal_zone_device *tzdev;
+ struct thermal_cooling_device *cdev;
struct device_node *dn = pdev->dev.of_node;
struct device_node *cpu_node, *lmh_node;
uint32_t id, max_freq, request_reg, clear_reg;
@@ -331,6 +396,10 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(tzdev))
return PTR_ERR(tzdev);
+ /* Setup cooling devices to request mitigation states */
+ cdev = cpufreq_platform_cooling_register(&hw->core_map, &cd_ops);
+ if (IS_ERR_OR_NULL(cdev))
+ return PTR_ERR(cdev);
/*
* Driver defaults to for low and hi thresholds.
* Since we make a check for hi > lo value, set the hi threshold
@@ -356,7 +425,7 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
return ret;
}
- hw->max_freq = max_freq;
+ hw->hw_freq_limit = hw->max_freq = max_freq;
switch (affinity) {
case 0:
@@ -399,6 +468,9 @@ static int msm_lmh_dcvs_probe(struct platform_device *pdev)
return ret;
}
+ INIT_LIST_HEAD(&hw->list);
+ list_add(&hw->list, &lmh_dcvs_hw_list);
+
return ret;
}
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 17ecd61e9ee6..ced2f23addd4 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -1019,7 +1019,7 @@ static int msm_lmh_dcvs_write(uint32_t node_id, uint32_t fn, uint32_t setting,
desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL,
SCM_VAL, SCM_VAL);
- dmac_flush_range(payload, payload + 5 * (sizeof(uint32_t)));
+ dmac_flush_range(payload, (void *)payload + 5 * (sizeof(uint32_t)));
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, MSM_LIMITS_DCVSH), &desc_arg);
kfree(payload);
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index fa3c9e511663..f0d5c96ac2e0 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1596,6 +1596,16 @@ static void flip_insert_work(struct work_struct *work)
struct tty_struct *tty = msm_uport->uport.state->port.tty;
spin_lock_irqsave(&msm_uport->uport.lock, flags);
+ if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
+ dev_err(msm_uport->uport.dev,
+ "%s:Invalid driver state flush %d\n",
+ __func__, msm_uport->rx.flush);
+ MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+ __func__, msm_uport->rx.flush);
+ spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
+ return;
+ }
+
if (msm_uport->rx.buffer_pending == NONE_PENDING) {
MSM_HS_ERR("Error: No buffer pending in %s", __func__);
spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
@@ -1668,6 +1678,16 @@ static void msm_serial_hs_rx_work(struct kthread_work *work)
spin_lock_irqsave(&uport->lock, flags);
+ if (!tty || rx->flush == FLUSH_SHUTDOWN) {
+ dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
+ __func__, rx->flush);
+ MSM_HS_ERR("%s:Invalid driver state flush %d\n",
+ __func__, rx->flush);
+ spin_unlock_irqrestore(&uport->lock, flags);
+ msm_hs_resource_unvote(msm_uport);
+ return;
+ }
+
/*
* Process all pending descs or if nothing is
* queued - called from termios
@@ -3555,6 +3575,7 @@ static int msm_hs_probe(struct platform_device *pdev)
}
msm_serial_debugfs_init(msm_uport, pdev->id);
+ msm_hs_unconfig_uart_gpios(uport);
uport->line = pdev->id;
if (pdata->userid && pdata->userid <= UARTDM_NR)
@@ -3663,12 +3684,12 @@ static void msm_hs_shutdown(struct uart_port *uport)
if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
/* disable and disconnect rx */
- msm_hs_disconnect_rx(uport);
ret = wait_event_timeout(msm_uport->rx.wait,
- msm_uport->rx.flush == FLUSH_SHUTDOWN, 500);
+ !msm_uport->rx.pending_flag, 500);
if (!ret)
MSM_HS_WARN("%s(): rx disconnect not complete",
__func__);
+ msm_hs_disconnect_rx(uport);
}
cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3df80c73b74a..ac0eb0939ecf 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2990,6 +2990,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->wakeup_work);
#endif
+ /* handle any pending hub events before XHCI stops */
+ usb_flush_hub_wq();
+
mutex_lock(&usb_bus_list_lock);
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 84df093639ac..269c1ee2da44 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -610,6 +610,12 @@ void usb_kick_hub_wq(struct usb_device *hdev)
kick_hub_wq(hub);
}
+void usb_flush_hub_wq(void)
+{
+ flush_workqueue(hub_wq);
+}
+EXPORT_SYMBOL(usb_flush_hub_wq);
+
/*
* Let the USB core know that a USB 3.0 device has sent a Function Wake Device
* Notification, which indicates it had initiated remote wakeup.
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4ad994972b19..805c5e1931e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -421,7 +421,16 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
if (dep->endpoint.ep_type == EP_TYPE_GSI)
return;
- if (dep->trb_pool && dep->trb_pool_dma) {
+ /*
+ * Clean up ep ring to avoid getting xferInProgress due to stale trbs
+ * with HWO bit set from previous composition when update transfer cmd
+ * is issued.
+ */
+ if (dep->number > 1 && dep->trb_pool && dep->trb_pool_dma) {
+ memset(&dep->trb_pool[0], 0,
+ sizeof(struct dwc3_trb) * dep->num_trbs);
+ dbg_event(dep->number, "Clr_TRB", 0);
+
dma_free_coherent(dwc->dev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool,
dep->trb_pool_dma);
@@ -723,17 +732,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
(dep->number & 1) ? "in" : "out");
}
- /*
- * Clean up ep ring of non-control endpoint to avoid getting xferInProgress
- * due to stale trbs with HWO bit set from previous composition when update
- * transfer cmd is issued.
- */
- if (dep->number > 1 && dep->trb_pool) {
- memset(&dep->trb_pool[0], 0,
- sizeof(struct dwc3_trb) * dep->num_trbs);
- dbg_event(dep->number, "Clr_TRB", 0);
- }
-
return 0;
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 8a0e7f988d25..98d5908c1e2f 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1111,7 +1111,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
@@ -1139,6 +1139,7 @@ static void f_midi_free(struct usb_function *f)
mutex_lock(&opts->lock);
for (i = opts->in_ports - 1; i >= 0; --i)
kfree(midi->in_port[i]);
+ opts->func_inst.f = NULL;
kfree(midi);
--opts->refcnt;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..4e35ed9654b7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -333,6 +333,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1431,8 +1502,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 316967415aa9..eb306529981f 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -1441,13 +1441,13 @@ bool rndis_qc_get_skip_ep_config(void)
return rndis_ipa_params.skip_ep_cfg;
}
-DECLARE_USB_FUNCTION_INIT(qcrndis, qcrndis_alloc_inst, qcrndis_alloc);
+DECLARE_USB_FUNCTION_INIT(rndis_bam, qcrndis_alloc_inst, qcrndis_alloc);
static int __init usb_qcrndis_init(void)
{
int ret;
- ret = usb_function_register(&qcrndisusb_func);
+ ret = usb_function_register(&rndis_bamusb_func);
if (ret) {
pr_err("%s: failed to register diag %d\n", __func__, ret);
return ret;
@@ -1457,7 +1457,7 @@ static int __init usb_qcrndis_init(void)
static void __exit usb_qcrndis_exit(void)
{
- usb_function_unregister(&qcrndisusb_func);
+ usb_function_unregister(&rndis_bamusb_func);
rndis_qc_cleanup();
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 12b98017beb2..2bc70d1cf6fa 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -59,6 +59,7 @@ enum usbpd_state {
PE_PRS_SRC_SNK_SEND_SWAP,
PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+ PE_VCS_WAIT_FOR_VCONN,
};
static const char * const usbpd_state_strings[] = {
@@ -94,6 +95,7 @@ static const char * const usbpd_state_strings[] = {
"PRS_SRC_SNK_Send_Swap",
"PRS_SRC_SNK_Transition_to_off",
"PRS_SRC_SNK_Wait_Source_on",
+ "VCS_Wait_for_VCONN",
};
enum usbpd_control_msg_type {
@@ -168,7 +170,12 @@ static void *usbpd_ipc_log;
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
+#define SWAP_SOURCE_START_TIME 20
#define VDM_BUSY_TIME 50
+#define VCONN_ON_TIME 100
+
+/* tPSHardReset + tSafe0V + tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_RECOVER_TIME (35 + 650 + 1000 + 275)
#define PD_CAPS_COUNT 50
@@ -191,7 +198,7 @@ static void *usbpd_ipc_log;
#define PD_RDO_MISMATCH(rdo) ((rdo) >> 26 & 1)
#define PD_RDO_USB_COMM(rdo) ((rdo) >> 25 & 1)
#define PD_RDO_NO_USB_SUSP(rdo) ((rdo) >> 24 & 1)
-#define PD_RDO_FIXED_CURR(rdo) ((rdo) >> 19 & 0x3FF)
+#define PD_RDO_FIXED_CURR(rdo) ((rdo) >> 10 & 0x3FF)
#define PD_RDO_FIXED_CURR_MINMAX(rdo) ((rdo) & 0x3FF)
#define PD_SRC_PDO_TYPE(pdo) (((pdo) >> 30) & 3)
@@ -219,9 +226,10 @@ static void *usbpd_ipc_log;
/* VDM header is the first 32-bit object following the 16-bit PD header */
#define VDM_HDR_SVID(hdr) ((hdr) >> 16)
-#define VDM_HDR_TYPE(hdr) ((hdr) & 0x8000)
-#define VDM_HDR_CMD_TYPE(hdr) (((hdr) >> 6) & 0x3)
-#define VDM_HDR_CMD(hdr) ((hdr) & 0x1f)
+#define VDM_IS_SVDM(hdr) ((hdr) & 0x8000)
+#define SVDM_HDR_OBJ_POS(hdr) (((hdr) >> 8) & 0x7)
+#define SVDM_HDR_CMD_TYPE(hdr) (((hdr) >> 6) & 0x3)
+#define SVDM_HDR_CMD(hdr) ((hdr) & 0x1f)
#define SVDM_HDR(svid, ver, obj, cmd_type, cmd) \
(((svid) << 16) | (1 << 15) | ((ver) << 13) \
@@ -249,15 +257,11 @@ static bool ss_dev = true;
module_param(ss_dev, bool, S_IRUSR | S_IWUSR);
static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
-
-static const u32 default_snk_caps[] = { 0x2601905A, /* 5V @ 900mA */
- 0x0002D096, /* 9V @ 1.5A */
- 0x0003C064 }; /* 12V @ 1A */
+static const u32 default_snk_caps[] = { 0x2601905A }; /* 5V @ 900mA */
struct vdm_tx {
u32 data[7];
int size;
- struct list_head entry;
};
struct usbpd {
@@ -265,6 +269,7 @@ struct usbpd {
struct workqueue_struct *wq;
struct work_struct sm_work;
struct hrtimer timer;
+ bool sm_queued;
struct extcon_dev *extcon;
@@ -303,6 +308,7 @@ struct usbpd {
struct regulator *vbus;
struct regulator *vconn;
bool vconn_enabled;
+ bool vconn_is_external;
u8 tx_msgid;
u8 rx_msgid;
@@ -311,8 +317,9 @@ struct usbpd {
enum vdm_state vdm_state;
u16 *discovered_svids;
+ int num_svids;
+ struct vdm_tx *vdm_tx;
struct vdm_tx *vdm_tx_retry;
- struct list_head vdm_tx_queue;
struct list_head svid_handlers;
struct list_head instance;
@@ -436,6 +443,12 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos)
}
pd->requested_voltage = PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+
+ /* Can't sink more than 5V if VCONN is sourced from the VBUS input */
+ if (pd->vconn_enabled && !pd->vconn_is_external &&
+ pd->requested_voltage > 5000000)
+ return -ENOTSUPP;
+
pd->requested_current = curr;
pd->requested_pdo = pdo_pos;
pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
@@ -446,6 +459,7 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos)
static int pd_eval_src_caps(struct usbpd *pd, const u32 *src_caps)
{
+ union power_supply_propval val;
u32 first_pdo = src_caps[0];
/* save the PDOs so userspace can further evaluate */
@@ -461,6 +475,10 @@ static int pd_eval_src_caps(struct usbpd *pd, const u32 *src_caps)
pd->peer_pr_swap = PD_SRC_PDO_FIXED_PR_SWAP(first_pdo);
pd->peer_dr_swap = PD_SRC_PDO_FIXED_DR_SWAP(first_pdo);
+ val.intval = PD_SRC_PDO_FIXED_USB_SUSP(first_pdo);
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
+
/* Select the first PDO (vSafe5V) immediately. */
pd_select_pdo(pd, 1);
@@ -479,6 +497,18 @@ static void pd_send_hard_reset(struct usbpd *pd)
ret = pd_phy_signal(HARD_RESET_SIG, 5); /* tHardResetComplete */
if (!ret)
pd->hard_reset = true;
+ pd->in_pr_swap = false;
+}
+
+static void kick_sm(struct usbpd *pd, int ms)
+{
+ pm_stay_awake(&pd->dev);
+ pd->sm_queued = true;
+
+ if (ms)
+ hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
+ else
+ queue_work(pd->wq, &pd->sm_work);
}
static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
@@ -493,7 +523,7 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type type)
/* Force CC logic to source/sink to keep Rp/Rd unchanged */
set_power_role(pd, pd->current_pr);
pd->hard_reset = true;
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, 0);
}
static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
@@ -508,7 +538,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
}
if (len < 2) {
- usbpd_err(&pd->dev, "invalid message received, len=%ld\n", len);
+ usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
return;
}
@@ -517,7 +547,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
len -= sizeof(u16);
if (len % 4 != 0) {
- usbpd_err(&pd->dev, "len=%ld not multiple of 4\n", len);
+ usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
return;
}
@@ -530,18 +560,29 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
pd->rx_msgid = PD_MSG_HDR_ID(header);
+ /* discard Pings */
+ if (PD_MSG_HDR_TYPE(header) == MSG_PING && !len)
+ return;
+
/* check header's count field to see if it matches len */
if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
- usbpd_err(&pd->dev, "header count (%d) mismatch, len=%ld\n",
+ usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
PD_MSG_HDR_COUNT(header), len);
return;
}
+ /* block until previous message has been consumed by usbpd_sm */
+ if (pd->rx_msg_type)
+ flush_work(&pd->sm_work);
+
pd->rx_msg_type = PD_MSG_HDR_TYPE(header);
pd->rx_msg_len = PD_MSG_HDR_COUNT(header);
memcpy(&pd->rx_payload, buf, len);
- queue_work(pd->wq, &pd->sm_work);
+ usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n",
+ pd->rx_msg_type, pd->rx_msg_len);
+
+ kick_sm(pd, 0);
}
static void phy_shutdown(struct usbpd *pd)
@@ -583,16 +624,24 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->in_pr_swap = false;
set_power_role(pd, PR_NONE);
pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, 0);
break;
/* Source states */
case PE_SRC_STARTUP:
if (pd->current_dr == DR_NONE) {
pd->current_dr = DR_DFP;
- /* Defer starting USB host mode until after PD */
+ /*
+ * Defer starting USB host mode until PE_SRC_READY or
+ * when PE_SRC_SEND_CAPABILITIES fails
+ */
}
+ /* Set CC back to DRP toggle for the next disconnect */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
pd->rx_msg_len = 0;
pd->rx_msg_type = 0;
pd->rx_msgid = -1;
@@ -618,22 +667,24 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->pd_phy_opened = true;
}
- val.intval = 1;
- power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_ACTIVE, &val);
-
- pd->in_pr_swap = false;
pd->current_state = PE_SRC_SEND_CAPABILITIES;
- usbpd_dbg(&pd->dev, "Enter %s\n",
- usbpd_state_strings[pd->current_state]);
+ if (pd->in_pr_swap) {
+ kick_sm(pd, SWAP_SOURCE_START_TIME);
+ break;
+ }
+
/* fall-through */
case PE_SRC_SEND_CAPABILITIES:
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, 0);
break;
case PE_SRC_NEGOTIATE_CAPABILITY:
- if (PD_RDO_OBJ_POS(pd->rdo) != 1) {
+ if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
+ PD_RDO_FIXED_CURR(pd->rdo) >
+ PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps) ||
+ PD_RDO_FIXED_CURR_MINMAX(pd->rdo) >
+ PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
/* send Reject */
ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
if (ret) {
@@ -703,6 +754,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
break;
case PE_SRC_TRANSITION_TO_DEFAULT:
+ pd->hard_reset = false;
+
if (pd->vconn_enabled)
regulator_disable(pd->vconn);
regulator_disable(pd->vbus);
@@ -727,13 +780,17 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
}
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
usbpd_set_state(pd, PE_SRC_STARTUP);
break;
case PE_SRC_HARD_RESET:
case PE_SNK_HARD_RESET:
/* hard reset may sleep; handle it in the workqueue */
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, 0);
break;
case PE_SRC_SEND_SOFT_RESET:
@@ -751,8 +808,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
/* wait for ACCEPT */
- hrtimer_start(&pd->timer, ms_to_ktime(SENDER_RESPONSE_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, SENDER_RESPONSE_TIME);
break;
/* Sink states */
@@ -772,9 +828,22 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
}
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ALLOWED, &val);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to read USB PROP_PD_ALLOWED: %d\n",
+ ret);
+ break;
+ }
+
+ if (!val.intval)
+ break;
+
+ /* Reset protocol layer */
+ pd->tx_msgid = 0;
+ pd->rx_msgid = -1;
pd->rx_msg_len = 0;
pd->rx_msg_type = 0;
- pd->rx_msgid = -1;
if (!pd->in_pr_swap) {
if (pd->pd_phy_opened) {
@@ -797,31 +866,16 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->pd_phy_opened = true;
}
- pd->in_pr_swap = false;
pd->current_voltage = 5000000;
- if (!pd->vbus_present) {
- /* can get here during a hard reset and we lost vbus */
- pd->current_state = PE_SNK_DISCOVERY;
- hrtimer_start(&pd->timer, ms_to_ktime(2000),
- HRTIMER_MODE_REL);
- break;
- }
-
- /*
- * If VBUS is already present go and skip ahead to
- * PE_SNK_WAIT_FOR_CAPABILITIES.
- */
pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
/* fall-through */
case PE_SNK_WAIT_FOR_CAPABILITIES:
if (pd->rx_msg_len && pd->rx_msg_type)
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, 0);
else
- hrtimer_start(&pd->timer,
- ms_to_ktime(SINK_WAIT_CAP_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, SINK_WAIT_CAP_TIME);
break;
case PE_SNK_EVALUATE_CAPABILITY:
@@ -839,18 +893,19 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
case PE_SNK_SELECT_CAPABILITY:
ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG);
- if (ret)
+ if (ret) {
usbpd_err(&pd->dev, "Error sending Request\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
/* wait for ACCEPT */
- hrtimer_start(&pd->timer, ms_to_ktime(SENDER_RESPONSE_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, SENDER_RESPONSE_TIME);
break;
case PE_SNK_TRANSITION_SINK:
/* wait for PS_RDY */
- hrtimer_start(&pd->timer, ms_to_ktime(PS_TRANSITION_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, PS_TRANSITION_TIME);
break;
case PE_SNK_READY:
@@ -875,15 +930,30 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->vconn_enabled = false;
}
- pd->tx_msgid = 0;
-
val.intval = pd->requested_voltage; /* set range back to 5V */
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
pd->current_voltage = pd->requested_voltage;
- /* recursive call; go back to beginning state */
- usbpd_set_state(pd, PE_SNK_STARTUP);
+ /* max time for hard reset to toggle vbus off/on */
+ kick_sm(pd, SNK_HARD_RESET_RECOVER_TIME);
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ val.intval = pd->requested_current = 0; /* suspend charging */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+
+ pd->in_explicit_contract = false;
+
+ /*
+ * need to update PR bit in message header so that
+ * proper GoodCRC is sent when receiving next PS_RDY
+ */
+ pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+ /* wait for PS_RDY */
+ kick_sm(pd, PS_SOURCE_OFF);
break;
default:
@@ -907,10 +977,10 @@ int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
/* already connected with this SVID discovered? */
if (pd->vdm_state >= DISCOVERED_SVIDS) {
- u16 *psvid;
+ int i;
- for (psvid = pd->discovered_svids; *psvid; psvid++) {
- if (*psvid == hdlr->svid) {
+ for (i = 0; i < pd->num_svids; i++) {
+ if (pd->discovered_svids[i] == hdlr->svid) {
if (hdlr->connect)
hdlr->connect(hdlr);
break;
@@ -932,7 +1002,7 @@ int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
{
struct vdm_tx *vdm_tx;
- if (!pd->in_explicit_contract)
+ if (!pd->in_explicit_contract || pd->vdm_tx)
return -EBUSY;
vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
@@ -945,8 +1015,10 @@ int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
vdm_tx->size = num_vdos + 1; /* include the header */
/* VDM will get sent in PE_SRC/SNK_READY state handling */
- list_add_tail(&vdm_tx->entry, &pd->vdm_tx_queue);
- queue_work(pd->wq, &pd->sm_work);
+ pd->vdm_tx = vdm_tx;
+
+ /* slight delay before queuing to prioritize handling of incoming VDM */
+ kick_sm(pd, 5);
return 0;
}
@@ -972,8 +1044,8 @@ static void handle_vdm_rx(struct usbpd *pd)
u16 svid = VDM_HDR_SVID(vdm_hdr);
u16 *psvid;
u8 i, num_vdos = pd->rx_msg_len - 1; /* num objects minus header */
- u8 cmd = VDM_HDR_CMD(vdm_hdr);
- u8 cmd_type = VDM_HDR_CMD_TYPE(vdm_hdr);
+ u8 cmd = SVDM_HDR_CMD(vdm_hdr);
+ u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
struct usbpd_svid_handler *handler;
usbpd_dbg(&pd->dev, "VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x\n",
@@ -983,7 +1055,7 @@ static void handle_vdm_rx(struct usbpd *pd)
handler = find_svid_handler(pd, svid);
/* Unstructured VDM */
- if (!VDM_HDR_TYPE(vdm_hdr)) {
+ if (!VDM_IS_SVDM(vdm_hdr)) {
if (handler && handler->vdm_received)
handler->vdm_received(handler, vdm_hdr, vdos, num_vdos);
return;
@@ -994,7 +1066,19 @@ static void handle_vdm_rx(struct usbpd *pd)
switch (cmd_type) {
case SVDM_CMD_TYPE_INITIATOR:
- if (cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
+ /*
+ * if this interrupts a previous exchange, abort the previous
+ * outgoing response
+ */
+ if (pd->vdm_tx) {
+ usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
+ VDM_HDR_SVID(pd->vdm_tx->data[0]));
+
+ kfree(pd->vdm_tx);
+ pd->vdm_tx = NULL;
+ }
+
+ if (svid == USBPD_SID && cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
u32 tx_vdos[3] = {
ID_HDR_USB_HOST | ID_HDR_USB_DEVICE |
ID_HDR_PRODUCT_PER_MASK | ID_HDR_VID,
@@ -1005,9 +1089,9 @@ static void handle_vdm_rx(struct usbpd *pd)
usbpd_send_svdm(pd, USBPD_SID, cmd,
SVDM_CMD_TYPE_RESP_ACK, 0, tx_vdos, 3);
- } else {
- usbpd_send_svdm(pd, USBPD_SID, cmd,
- SVDM_CMD_TYPE_RESP_NAK, 0, NULL, 0);
+ } else if (cmd != USBPD_SVDM_ATTENTION) {
+ usbpd_send_svdm(pd, svid, cmd, SVDM_CMD_TYPE_RESP_NAK,
+ SVDM_HDR_OBJ_POS(vdm_hdr), NULL, 0);
}
break;
@@ -1039,19 +1123,37 @@ static void handle_vdm_rx(struct usbpd *pd)
kfree(pd->vdm_tx_retry);
pd->vdm_tx_retry = NULL;
- kfree(pd->discovered_svids);
-
- /* TODO: handle > 12 SVIDs */
- pd->discovered_svids = kzalloc((2 * num_vdos + 1) *
- sizeof(u16),
- GFP_KERNEL);
if (!pd->discovered_svids) {
- usbpd_err(&pd->dev, "unable to allocate SVIDs\n");
- break;
+ pd->num_svids = 2 * num_vdos;
+ pd->discovered_svids = kcalloc(pd->num_svids,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!pd->discovered_svids) {
+ usbpd_err(&pd->dev, "unable to allocate SVIDs\n");
+ break;
+ }
+
+ psvid = pd->discovered_svids;
+ } else { /* handle > 12 SVIDs */
+ void *ptr;
+ size_t oldsize = pd->num_svids * sizeof(u16);
+ size_t newsize = oldsize +
+ (2 * num_vdos * sizeof(u16));
+
+ ptr = krealloc(pd->discovered_svids, newsize,
+ GFP_KERNEL);
+ if (!ptr) {
+ usbpd_err(&pd->dev, "unable to realloc SVIDs\n");
+ break;
+ }
+
+ pd->discovered_svids = ptr;
+ psvid = pd->discovered_svids + pd->num_svids;
+ memset(psvid, 0, (2 * num_vdos));
+ pd->num_svids += 2 * num_vdos;
}
/* convert 32-bit VDOs to list of 16-bit SVIDs */
- psvid = pd->discovered_svids;
for (i = 0; i < num_vdos * 2; i++) {
/*
* Within each 32-bit VDO,
@@ -1075,8 +1177,22 @@ static void handle_vdm_rx(struct usbpd *pd)
usbpd_dbg(&pd->dev, "Discovered SVID: 0x%04x\n",
svid);
*psvid++ = svid;
+ }
+ }
+
+ /* if more than 12 SVIDs, resend the request */
+ if (num_vdos == 6 && vdos[5] != 0) {
+ usbpd_send_svdm(pd, USBPD_SID,
+ USBPD_SVDM_DISCOVER_SVIDS,
+ SVDM_CMD_TYPE_INITIATOR, 0,
+ NULL, 0);
+ break;
+ }
- /* if SVID supported notify handler */
+ /* now that all SVIDs are discovered, notify handlers */
+ for (i = 0; i < pd->num_svids; i++) {
+ svid = pd->discovered_svids[i];
+ if (svid) {
handler = find_svid_handler(pd, svid);
if (handler && handler->connect)
handler->connect(handler);
@@ -1126,10 +1242,9 @@ static void handle_vdm_rx(struct usbpd *pd)
}
/* wait tVDMBusy, then retry */
- list_move(&pd->vdm_tx_retry->entry, &pd->vdm_tx_queue);
+ pd->vdm_tx = pd->vdm_tx_retry;
pd->vdm_tx_retry = NULL;
- hrtimer_start(&pd->timer, ms_to_ktime(VDM_BUSY_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, VDM_BUSY_TIME);
break;
default:
break;
@@ -1143,16 +1258,14 @@ static void handle_vdm_tx(struct usbpd *pd)
int ret;
/* only send one VDM at a time */
- if (!list_empty(&pd->vdm_tx_queue)) {
- struct vdm_tx *vdm_tx = list_first_entry(&pd->vdm_tx_queue,
- struct vdm_tx, entry);
- u32 vdm_hdr = vdm_tx->data[0];
+ if (pd->vdm_tx) {
+ u32 vdm_hdr = pd->vdm_tx->data[0];
- ret = pd_send_msg(pd, MSG_VDM, vdm_tx->data, vdm_tx->size,
- SOP_MSG);
+ ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data,
+ pd->vdm_tx->size, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending VDM command %d\n",
- VDM_HDR_CMD(vdm_tx->data[0]));
+ SVDM_HDR_CMD(pd->vdm_tx->data[0]));
usbpd_set_state(pd, pd->current_pr == PR_SRC ?
PE_SRC_SEND_SOFT_RESET :
PE_SNK_SEND_SOFT_RESET);
@@ -1161,24 +1274,25 @@ static void handle_vdm_tx(struct usbpd *pd)
return;
}
- list_del(&vdm_tx->entry);
-
/*
* special case: keep initiated Discover ID/SVIDs
* around in case we need to re-try when receiving BUSY
*/
- if (VDM_HDR_TYPE(vdm_hdr) &&
- VDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
- VDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
+ if (VDM_IS_SVDM(vdm_hdr) &&
+ SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
+ SVDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
if (pd->vdm_tx_retry) {
usbpd_err(&pd->dev, "Previous Discover VDM command %d not ACKed/NAKed\n",
- VDM_HDR_CMD(pd->vdm_tx_retry->data[0]));
+ SVDM_HDR_CMD(
+ pd->vdm_tx_retry->data[0]));
kfree(pd->vdm_tx_retry);
}
- pd->vdm_tx_retry = vdm_tx;
+ pd->vdm_tx_retry = pd->vdm_tx;
} else {
- kfree(vdm_tx);
+ kfree(pd->vdm_tx);
}
+
+ pd->vdm_tx = NULL;
}
}
@@ -1194,13 +1308,9 @@ static void reset_vdm_state(struct usbpd *pd)
pd->vdm_tx_retry = NULL;
kfree(pd->discovered_svids);
pd->discovered_svids = NULL;
- while (!list_empty(&pd->vdm_tx_queue)) {
- struct vdm_tx *vdm_tx =
- list_first_entry(&pd->vdm_tx_queue,
- struct vdm_tx, entry);
- list_del(&vdm_tx->entry);
- kfree(vdm_tx);
- }
+ pd->num_svids = 0;
+ kfree(pd->vdm_tx);
+ pd->vdm_tx = NULL;
}
static void dr_swap(struct usbpd *pd)
@@ -1230,6 +1340,34 @@ static void dr_swap(struct usbpd *pd)
pd_phy_update_roles(pd->current_dr, pd->current_pr);
}
+
+static void vconn_swap(struct usbpd *pd)
+{
+ int ret;
+
+ if (pd->vconn_enabled) {
+ pd->current_state = PE_VCS_WAIT_FOR_VCONN;
+ kick_sm(pd, VCONN_ON_TIME);
+ } else {
+ ret = regulator_enable(pd->vconn);
+ if (ret) {
+ usbpd_err(&pd->dev, "Unable to enable vconn\n");
+ return;
+ }
+
+ pd->vconn_enabled = true;
+
+ ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending PS_RDY\n");
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_SEND_SOFT_RESET :
+ PE_SNK_SEND_SOFT_RESET);
+ return;
+ }
+ }
+}
+
/* Handles current state and determines transitions */
static void usbpd_sm(struct work_struct *w)
{
@@ -1243,6 +1381,7 @@ static void usbpd_sm(struct work_struct *w)
usbpd_state_strings[pd->current_state]);
hrtimer_cancel(&pd->timer);
+ pd->sm_queued = false;
if (pd->rx_msg_len)
data_recvd = pd->rx_msg_type;
@@ -1250,11 +1389,11 @@ static void usbpd_sm(struct work_struct *w)
ctrl_recvd = pd->rx_msg_type;
/* Disconnect? */
- if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+ if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE && !pd->in_pr_swap) {
if (pd->current_state == PE_UNKNOWN)
- return;
+ goto sm_done;
- usbpd_info(&pd->dev, "USB PD disconnect\n");
+ usbpd_info(&pd->dev, "USB Type-C disconnect\n");
if (pd->pd_phy_opened) {
pd_phy_close();
@@ -1274,14 +1413,21 @@ static void usbpd_sm(struct work_struct *w)
val.intval = 0;
power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+ &val);
+
+ power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- if (pd->current_pr == PR_SRC) {
+ if (pd->current_pr == PR_SRC)
regulator_disable(pd->vbus);
- if (pd->vconn_enabled) {
- regulator_disable(pd->vconn);
- pd->vconn_enabled = false;
- }
+
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
}
if (pd->current_dr == DR_UFP)
@@ -1306,18 +1452,24 @@ static void usbpd_sm(struct work_struct *w)
pd->current_state = PE_UNKNOWN;
- return;
+ goto sm_done;
}
/* Hard reset? */
if (pd->hard_reset) {
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ pd->in_pr_swap = false;
reset_vdm_state(pd);
if (pd->current_pr == PR_SINK)
usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
else
usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
- pd->hard_reset = false;
+
+ goto sm_done;
}
/* Soft reset? */
@@ -1353,6 +1505,10 @@ static void usbpd_sm(struct work_struct *w)
}
break;
+ case PE_SRC_STARTUP:
+ usbpd_set_state(pd, PE_SRC_STARTUP);
+ break;
+
case PE_SRC_SEND_CAPABILITIES:
ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
ARRAY_SIZE(default_src_caps), SOP_MSG);
@@ -1378,30 +1534,31 @@ static void usbpd_sm(struct work_struct *w)
break;
}
- hrtimer_start(&pd->timer, ms_to_ktime(SRC_CAP_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, SRC_CAP_TIME);
break;
}
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
/* transmit was successful if GoodCRC was received */
pd->caps_count = 0;
pd->hard_reset_count = 0;
pd->pd_connected = true; /* we know peer is PD capable */
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
- power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
-
/* wait for REQUEST */
pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
- hrtimer_start(&pd->timer, ms_to_ktime(SENDER_RESPONSE_TIME),
- HRTIMER_MODE_REL);
+ kick_sm(pd, SENDER_RESPONSE_TIME);
break;
case PE_SRC_SEND_CAPABILITIES_WAIT:
if (data_recvd == MSG_REQUEST) {
pd->rdo = pd->rx_payload[0];
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+ } else if (data_recvd || ctrl_recvd) {
+ usbpd_err(&pd->dev, "Unexpected message received\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
} else {
usbpd_set_state(pd, PE_SRC_HARD_RESET);
}
@@ -1417,6 +1574,14 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
break;
}
+ } else if (ctrl_recvd == MSG_GET_SINK_CAP) {
+ ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+ default_snk_caps,
+ ARRAY_SIZE(default_snk_caps), SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Sink Caps\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
} else if (data_recvd == MSG_REQUEST) {
pd->rdo = pd->rx_payload[0];
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
@@ -1436,6 +1601,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* we'll happily accept Src->Sink requests anytime */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1445,8 +1613,17 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- queue_work(pd->wq, &pd->sm_work);
+ kick_sm(pd, SRC_TRANSITION_TIME);
break;
+ } else if (ctrl_recvd == MSG_VCONN_SWAP) {
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ break;
+ }
+
+ vconn_swap(pd);
} else {
if (data_recvd == MSG_VDM)
handle_vdm_rx(pd);
@@ -1456,6 +1633,10 @@ static void usbpd_sm(struct work_struct *w)
break;
case PE_SRC_HARD_RESET:
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
pd_send_hard_reset(pd);
pd->in_explicit_contract = false;
reset_vdm_state(pd);
@@ -1465,39 +1646,41 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
break;
- case PE_SNK_DISCOVERY:
- if (!pd->vbus_present) {
- /* Hard reset and VBUS didn't come back? */
- power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
- if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
- pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
- queue_work(pd->wq, &pd->sm_work);
- }
- break;
- }
-
- usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES);
+ case PE_SNK_STARTUP:
+ usbpd_set_state(pd, PE_SNK_STARTUP);
break;
case PE_SNK_WAIT_FOR_CAPABILITIES:
if (data_recvd == MSG_SOURCE_CAPABILITIES) {
- val.intval = 1;
+ val.intval = 0;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ &val);
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ val.intval = 1;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
} else if (pd->hard_reset_count < 3) {
usbpd_set_state(pd, PE_SNK_HARD_RESET);
} else if (pd->pd_connected) {
usbpd_info(&pd->dev, "Sink hard reset count exceeded, forcing reconnect\n");
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ &val);
+
usbpd_set_state(pd, PE_ERROR_RECOVERY);
} else {
usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ &val);
+
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
@@ -1514,9 +1697,14 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_VOLTAGE_MIN,
&val);
- val.intval = 0; /* suspend charging */
+ /*
+ * disable charging; technically we are allowed to
+ * charge up to pSnkStdby (2.5 W) during this
+ * transition, but disable it just for simplicity.
+ */
+ val.intval = 0;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
pd->selected_pdo = pd->requested_pdo;
usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
@@ -1526,6 +1714,9 @@ static void usbpd_sm(struct work_struct *w)
else
usbpd_set_state(pd,
PE_SNK_WAIT_FOR_CAPABILITIES);
+ } else if (pd->rx_msg_type) {
+ usbpd_err(&pd->dev, "Invalid response to sink request\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
} else {
/* timed out; go to hard reset */
usbpd_set_state(pd, PE_SNK_HARD_RESET);
@@ -1544,7 +1735,7 @@ static void usbpd_sm(struct work_struct *w)
/* resume charging */
val.intval = pd->requested_current * 1000; /* mA->uA */
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
usbpd_set_state(pd, PE_SNK_READY);
} else {
@@ -1554,9 +1745,9 @@ static void usbpd_sm(struct work_struct *w)
break;
case PE_SNK_READY:
- if (data_recvd == MSG_SOURCE_CAPABILITIES)
+ if (data_recvd == MSG_SOURCE_CAPABILITIES) {
usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
- else if (ctrl_recvd == MSG_GET_SINK_CAP) {
+ } else if (ctrl_recvd == MSG_GET_SINK_CAP) {
ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
default_snk_caps,
ARRAY_SIZE(default_snk_caps), SOP_MSG);
@@ -1564,6 +1755,15 @@ static void usbpd_sm(struct work_struct *w)
usbpd_err(&pd->dev, "Error sending Sink Caps\n");
usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
}
+ } else if (ctrl_recvd == MSG_GET_SOURCE_CAP) {
+ ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
+ default_src_caps,
+ ARRAY_SIZE(default_src_caps), SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending SRC CAPs\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
} else if (ctrl_recvd == MSG_DR_SWAP) {
if (pd->vdm_state == MODE_ENTERED) {
usbpd_set_state(pd, PE_SNK_HARD_RESET);
@@ -1580,6 +1780,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* TODO: should we Reject in certain circumstances? */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1589,20 +1792,34 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+ break;
+ } else if (ctrl_recvd == MSG_VCONN_SWAP) {
/*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
+ * if VCONN is connected to VBUS, make sure we are
+ * not in high voltage contract, otherwise reject.
*/
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ if (!pd->vconn_is_external &&
+ (pd->requested_voltage > 5000000)) {
+ ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Reject\n");
+ usbpd_set_state(pd,
+ PE_SNK_SEND_SOFT_RESET);
+ }
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
- break;
+ break;
+ }
+
+ ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Accept\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+
+ vconn_swap(pd);
} else {
if (data_recvd == MSG_VDM)
handle_vdm_rx(pd);
@@ -1611,6 +1828,26 @@ static void usbpd_sm(struct work_struct *w)
}
break;
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ pd->hard_reset = false;
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+ if (pd->vbus_present) {
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ } else {
+ /* Hard reset and VBUS didn't come back? */
+ power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+ if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
+ pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+ kick_sm(pd, 0);
+ }
+ }
+ break;
+
case PE_SRC_SOFT_RESET:
case PE_SNK_SOFT_RESET:
/* Reset protocol layer */
@@ -1649,14 +1886,14 @@ static void usbpd_sm(struct work_struct *w)
/* prepare charger for VBUS change */
val.intval = 1;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
pd->requested_voltage = 5000000;
if (pd->requested_current) {
val.intval = pd->requested_current = 0;
power_supply_set_property(pd->usb_psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
}
val.intval = pd->requested_voltage;
@@ -1684,16 +1921,23 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- /* fall-through */
+ kick_sm(pd, SRC_TRANSITION_TIME);
+ break;
+
case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
pd->in_pr_swap = true;
pd->in_explicit_contract = false;
regulator_disable(pd->vbus);
- set_power_role(pd, PR_SINK); /* switch Rp->Rd */
+
+ /* PE_PRS_SRC_SNK_Assert_Rd */
pd->current_pr = PR_SINK;
+ set_power_role(pd, pd->current_pr);
pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ /* allow time for Vbus discharge, must be < tSrcSwapStdby */
+ msleep(500);
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1702,8 +1946,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_WAIT_SOURCE_ON;
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_ON),
- HRTIMER_MODE_REL);
+ kick_sm(pd, PS_SOURCE_ON);
break;
case PE_PRS_SRC_SNK_WAIT_SOURCE_ON:
@@ -1720,19 +1963,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
- /*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
- */
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
-
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
@@ -1741,14 +1972,20 @@ static void usbpd_sm(struct work_struct *w)
break;
}
+ /* PE_PRS_SNK_SRC_Assert_Rp */
+ pd->current_pr = PR_SRC;
+ set_power_role(pd, pd->current_pr);
pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
/* fall-through */
+
case PE_PRS_SNK_SRC_SOURCE_ON:
- set_power_role(pd, PR_SRC);
ret = regulator_enable(pd->vbus);
if (ret)
usbpd_err(&pd->dev, "Unable to enable vbus\n");
+ msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1759,6 +1996,26 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_STARTUP);
break;
+ case PE_VCS_WAIT_FOR_VCONN:
+ if (ctrl_recvd == MSG_PS_RDY) {
+ /*
+ * hopefully redundant check but in case not enabled
+ * avoids unbalanced regulator disable count
+ */
+ if (pd->vconn_enabled)
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
+
+ pd->current_state = pd->current_pr == PR_SRC ?
+ PE_SRC_READY : PE_SNK_READY;
+ } else {
+ /* timed out; go to hard reset */
+ usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+ PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+ }
+
+ break;
+
default:
usbpd_err(&pd->dev, "Unhandled state %s\n",
usbpd_state_strings[pd->current_state]);
@@ -1767,6 +2024,10 @@ static void usbpd_sm(struct work_struct *w)
/* Rx message should have been consumed now */
pd->rx_msg_type = pd->rx_msg_len = 0;
+
+sm_done:
+ if (!pd->sm_queued)
+ pm_relax(&pd->dev);
}
static inline const char *src_current(enum power_supply_typec_mode typec_mode)
@@ -1787,72 +2048,42 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
{
struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
union power_supply_propval val;
- bool pd_allowed;
enum power_supply_typec_mode typec_mode;
- enum power_supply_type psy_type;
int ret;
if (ptr != pd->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
return 0;
ret = power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PD_ALLOWED, &val);
+ POWER_SUPPLY_PROP_TYPEC_MODE, &val);
if (ret) {
- usbpd_err(&pd->dev, "Unable to read USB PROP_PD_ALLOWED: %d\n",
- ret);
+ usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
return ret;
}
- pd_allowed = val.intval;
+ typec_mode = val.intval;
ret = power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_PRESENT, &val);
+ POWER_SUPPLY_PROP_PE_START, &val);
if (ret) {
- usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
+ usbpd_err(&pd->dev, "Unable to read USB PROP_PE_START: %d\n",
+ ret);
return ret;
}
- pd->vbus_present = val.intval;
+ /* Don't proceed if PE_START=0 as other props may still change */
+ if (!val.intval && !pd->pd_connected &&
+ typec_mode != POWER_SUPPLY_TYPEC_NONE)
+ return 0;
ret = power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+ POWER_SUPPLY_PROP_PRESENT, &val);
if (ret) {
- usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
+ usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
return ret;
}
- typec_mode = val.intval;
-
- /*
- * Don't proceed if cable is connected but PD_ALLOWED is false.
- * It means the PMIC may still be in the middle of performing
- * charger type detection.
- */
- if (!pd_allowed && typec_mode != POWER_SUPPLY_TYPEC_NONE)
- return 0;
-
- /*
- * Workaround for PMIC HW bug.
- *
- * During hard reset or PR swap (sink to source) when VBUS goes to 0
- * the CC logic will report this as a disconnection. In those cases it
- * can be ignored, however the downside is that pd->hard_reset can be
- * momentarily true even when a non-PD capable source is attached, and
- * can't be distinguished from a physical disconnect. In that case,
- * allow for the common case of disconnecting from an SDP.
- *
- * The less common case is a PD-capable SDP which will result in a
- * hard reset getting treated like a disconnect. We can live with this
- * until the HW bug is fixed: in which disconnection won't be reported
- * on VBUS loss alone unless pullup is also removed from CC.
- */
- if ((pd->hard_reset || pd->in_pr_swap) &&
- typec_mode == POWER_SUPPLY_TYPEC_NONE &&
- pd->psy_type != POWER_SUPPLY_TYPE_USB) {
- usbpd_dbg(&pd->dev, "Ignoring disconnect due to %s\n",
- pd->hard_reset ? "hard reset" : "PR swap");
- return 0;
- }
+ pd->vbus_present = val.intval;
ret = power_supply_get_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPE, &val);
@@ -1861,22 +2092,51 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
return ret;
}
- psy_type = val.intval;
-
- usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d\n", typec_mode,
- pd->vbus_present, psy_type);
+ pd->psy_type = val.intval;
- /* any change? */
- if (pd->typec_mode == typec_mode && pd->psy_type == psy_type)
+ if (pd->typec_mode == typec_mode)
return 0;
pd->typec_mode = typec_mode;
- pd->psy_type = psy_type;
+
+ usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d orientation:%d\n",
+ typec_mode, pd->vbus_present, pd->psy_type,
+ usbpd_get_plug_orientation(pd));
switch (typec_mode) {
/* Disconnect */
case POWER_SUPPLY_TYPEC_NONE:
- queue_work(pd->wq, &pd->sm_work);
+ if (pd->in_pr_swap) {
+ usbpd_dbg(&pd->dev, "Ignoring disconnect due to PR swap\n");
+ return 0;
+ }
+
+ /*
+ * Workaround for PMIC HW bug.
+ *
+ * During hard reset when VBUS goes to 0 the CC logic
+ * will report this as a disconnection. In those cases
+ * it can be ignored, however the downside is that
+ * pd->hard_reset can be momentarily true even when a
+ * non-PD capable source is attached, and can't be
+ * distinguished from a physical disconnect. In that
+ * case, allow for the common case of disconnecting
+ * from an SDP.
+ *
+ * The less common case is a PD-capable SDP which will
+ * result in a hard reset getting treated like a
+ * disconnect. We can live with this until the HW bug
+ * is fixed: in which disconnection won't be reported
+ * on VBUS loss alone unless pullup is also removed
+ * from CC.
+ */
+ if (pd->psy_type != POWER_SUPPLY_TYPE_USB &&
+ pd->current_state ==
+ PE_SNK_TRANSITION_TO_DEFAULT) {
+ usbpd_dbg(&pd->dev, "Ignoring disconnect due to hard reset\n");
+ return 0;
+ }
+
break;
/* Sink states */
@@ -1885,10 +2145,8 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
src_current(typec_mode));
- if (pd->current_pr != PR_SINK) {
- pd->current_pr = PR_SINK;
- queue_work(pd->wq, &pd->sm_work);
- }
+ pd->current_pr = PR_SINK;
+ pd->in_pr_swap = false;
break;
/* Source states */
@@ -1897,10 +2155,8 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
typec_mode == POWER_SUPPLY_TYPEC_SINK ?
"" : " (powered)");
- if (pd->current_pr != PR_SRC) {
- pd->current_pr = PR_SRC;
- queue_work(pd->wq, &pd->sm_work);
- }
+ pd->current_pr = PR_SRC;
+ pd->in_pr_swap = false;
break;
case POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY:
@@ -1910,10 +2166,13 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
usbpd_info(&pd->dev, "Type-C Analog Audio Adapter connected\n");
break;
default:
- usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n", typec_mode);
+ usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n",
+ typec_mode);
break;
}
+ /* queue state machine due to CC state change */
+ kick_sm(pd, 0);
return 0;
}
@@ -2337,6 +2596,10 @@ struct usbpd *usbpd_create(struct device *parent)
if (ret)
goto free_pd;
+ ret = device_init_wakeup(&pd->dev, true);
+ if (ret)
+ goto free_pd;
+
ret = device_add(&pd->dev);
if (ret)
goto free_pd;
@@ -2392,11 +2655,13 @@ struct usbpd *usbpd_create(struct device *parent)
goto unreg_psy;
}
+ pd->vconn_is_external = device_property_present(parent,
+ "qcom,vconn-uses-external-source");
+
pd->current_pr = PR_NONE;
pd->current_dr = DR_NONE;
list_add_tail(&pd->instance, &_usbpd);
- INIT_LIST_HEAD(&pd->vdm_tx_queue);
INIT_LIST_HEAD(&pd->svid_handlers);
/* force read initial power_supply values */
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 5b5e6210a1bb..0b9b60c3ca45 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -23,6 +23,8 @@
#include <linux/of_irq.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include "usbpd.h"
#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 6bf80e43cac5..021755f7f32d 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -187,8 +187,8 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ u32 tooff = 0, fromoff = 0;
+ u32 size;
if (to->start > from->start)
fromoff = to->start - from->start;
@@ -198,10 +198,10 @@ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
return -EINVAL;
size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
+ if (size > (from->len - fromoff))
size = from->len - fromoff;
size *= sizeof(u16);
- if (!size)
+ if (size == 0)
return -EINVAL;
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index a5368cdf2254..1e93a5b2e9ba 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -193,6 +193,7 @@ enum mdss_qos_settings {
MDSS_QOS_REMAPPER,
MDSS_QOS_IB_NOCR,
MDSS_QOS_WB2_WRITE_GATHER_EN,
+ MDSS_QOS_WB_QOS,
MDSS_QOS_MAX,
};
@@ -459,6 +460,7 @@ struct mdss_data_type {
u32 nmax_concurrent_ad_hw;
struct workqueue_struct *ad_calc_wq;
u32 ad_debugen;
+ bool mem_retain;
struct mdss_intr hist_intr;
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 57e18a7dc5e1..b246204f3181 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -45,6 +45,18 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+static u32 supported_modes[] = {
+ HDMI_VFRMT_640x480p60_4_3,
+ HDMI_VFRMT_720x480p60_4_3, HDMI_VFRMT_720x480p60_16_9,
+ HDMI_VFRMT_1280x720p60_16_9,
+ HDMI_VFRMT_1920x1080p60_16_9,
+ HDMI_VFRMT_3840x2160p24_16_9, HDMI_VFRMT_3840x2160p30_16_9,
+ HDMI_VFRMT_3840x2160p60_16_9,
+ HDMI_VFRMT_4096x2160p24_256_135, HDMI_VFRMT_4096x2160p30_256_135,
+ HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9
+};
+
static void mdss_dp_put_dt_clk_data(struct device *dev,
struct dss_module_power *module_power)
{
@@ -789,17 +801,34 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp)
cap = &dp->dpcd;
- data = dp->lane_cnt - 1;
- data <<= 4;
+ data |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
+
+ /* Color Format */
+ switch (dp->panel_data.panel_info.out_format) {
+ case MDP_Y_CBCR_H2V2:
+ data |= (1 << 11); /* YUV420 */
+ break;
+ case MDP_Y_CBCR_H2V1:
+ data |= (2 << 11); /* YUV422 */
+ break;
+ default:
+ data |= (0 << 11); /* RGB */
+ break;
+ }
+
+ /* Scrambler reset enable */
+ if (cap->scrambler_reset)
+ data |= (1 << 10);
+
+ if (dp->edid.color_depth != 6)
+ data |= 0x100; /* Default: 8 bits */
+
+ /* Num of Lanes */
+ data |= ((dp->lane_cnt - 1) << 4);
if (cap->enhanced_frame)
data |= 0x40;
- if (dp->edid.color_depth == 8) {
- /* 0 == 6 bits, 1 == 8 bits */
- data |= 0x100; /* bit 8 */
- }
-
if (!timing->interlaced) /* progressive */
data |= 0x04;
@@ -863,6 +892,8 @@ static int dp_audio_info_setup(struct platform_device *pdev,
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+ dp_ctrl->wait_for_audio_comp = true;
+
return rc;
} /* dp_audio_info_setup */
@@ -885,6 +916,17 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev,
return rc;
} /* dp_get_audio_edid_blk */
+static void dp_audio_codec_teardown_done(struct platform_device *pdev)
+{
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
+
+ if (!dp)
+ pr_err("invalid input\n");
+
+ pr_debug("audio codec teardown done\n");
+ complete_all(&dp->audio_comp);
+}
+
static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
@@ -906,6 +948,8 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
dp_get_audio_edid_blk;
dp->ext_audio_data.codec_ops.cable_status =
dp_get_cable_status;
+ dp->ext_audio_data.codec_ops.teardown_done =
+ dp_audio_codec_teardown_done;
if (!dp->pdev->dev.of_node) {
pr_err("%s cannot find dp dev.of_node\n", __func__);
@@ -936,8 +980,6 @@ end:
return ret;
}
-#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
-
static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
{
struct mdss_panel_info *pinfo;
@@ -949,7 +991,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return -EINVAL;
}
- dp_drv->ds_data.ds_registered = false;
ret = hdmi_get_supported_mode(&timing, &dp_drv->ds_data, vic);
pinfo = &dp_drv->panel_data.panel_info;
@@ -981,12 +1022,21 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
pinfo->lcdc.hsync_skew = 0;
pinfo->is_pluggable = true;
+ dp_drv->bpp = pinfo->bpp;
+
pr_debug("update res. vic= %d, pclk_rate = %llu\n",
dp_drv->vic, pinfo->clk_rate);
return 0;
} /* dp_init_panel_info */
+static inline void mdss_dp_set_audio_switch_node(
+ struct mdss_dp_drv_pdata *dp, int val)
+{
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
+ val);
+}
int mdss_dp_on(struct mdss_panel_data *pdata)
{
@@ -1054,6 +1104,9 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
goto exit;
}
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
+
pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate =
@@ -1096,6 +1149,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
pr_debug("mainlink ready\n");
dp_drv->power_on = true;
+ mdss_dp_set_audio_switch_node(dp_drv, true);
pr_debug("End-\n");
exit:
@@ -1103,35 +1157,66 @@ exit:
return ret;
}
-int mdss_dp_off(struct mdss_panel_data *pdata)
+static void mdss_dp_mainlink_off(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
+ const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
if (!dp_drv) {
pr_err("Invalid input data\n");
- return -EINVAL;
+ return;
}
- pr_debug("Entered++, cont_splash=%d\n", dp_drv->cont_splash);
+ pr_debug("Entered++\n");
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
reinit_completion(&dp_drv->idle_comp);
+ mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
+ if (!wait_for_completion_timeout(&dp_drv->idle_comp,
+ idle_pattern_completion_timeout_ms))
+ pr_warn("PUSH_IDLE pattern timedout\n");
+
+ mutex_unlock(&dp_drv->train_mutex);
+ pr_debug("mainlink off done\n");
+}
+
+int mdss_dp_off(struct mdss_panel_data *pdata)
+{
+ struct mdss_dp_drv_pdata *dp_drv = NULL;
+
+ dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+ if (!dp_drv) {
+ pr_err("Invalid input data\n");
+ return -EINVAL;
+ }
+ pr_debug("Entered++, cont_splash=%d\n", dp_drv->cont_splash);
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0);
+ /* wait until link training is completed */
+ mutex_lock(&dp_drv->train_mutex);
if (dp_drv->link_clks_on)
mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+ mdss_dp_audio_enable(&dp_drv->ctrl_io, false);
+
mdss_dp_irq_disable(dp_drv);
mdss_dp_config_gpios(dp_drv, false);
mdss_dp_pinctrl_set_state(dp_drv, false);
+ /*
+ * The global reset will need DP link ralated clocks to be
+ * running. Add the global reset just before disabling the
+ * link clocks and core clocks.
+ */
+ mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
+
/* Make sure DP is disabled before clk disable */
wmb();
mdss_dp_clk_ctrl(dp_drv, DP_CTRL_PM, false);
@@ -1147,14 +1232,6 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
-static inline void mdss_dp_set_audio_switch_node(
- struct mdss_dp_drv_pdata *dp, int val)
-{
- if (dp && dp->ext_audio_data.intf_ops.notify)
- dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
- val);
-}
-
static void mdss_dp_send_cable_notification(
struct mdss_dp_drv_pdata *dp, int val)
{
@@ -1169,6 +1246,38 @@ static void mdss_dp_send_cable_notification(
dp->ext_audio_data.type, val);
}
+static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp)
+{
+ const int audio_completion_timeout_ms = HZ * 3;
+ int ret = 0;
+
+ if (!dp->wait_for_audio_comp)
+ return;
+
+ reinit_completion(&dp->audio_comp);
+ ret = wait_for_completion_timeout(&dp->audio_comp,
+ audio_completion_timeout_ms);
+ if (ret <= 0)
+ pr_warn("audio codec teardown timed out\n");
+
+ dp->wait_for_audio_comp = false;
+}
+
+static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable)
+{
+ if (enable) {
+ mdss_dp_send_cable_notification(dp, enable);
+ } else {
+ mdss_dp_set_audio_switch_node(dp, enable);
+ mdss_dp_audio_codec_wait(dp);
+ mdss_dp_send_cable_notification(dp, enable);
+ }
+
+ pr_debug("notify state %s done\n",
+ enable ? "ENABLE" : "DISABLE");
+}
+
+
static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
@@ -1183,6 +1292,10 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
+ dp_drv->ds_data.ds_registered = true;
+ dp_drv->ds_data.modes_num = ARRAY_SIZE(supported_modes);
+ dp_drv->ds_data.modes = supported_modes;
+
dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
edid_init_data.kobj = dp_drv->kobj;
edid_init_data.ds_data = dp_drv->ds_data;
@@ -1236,15 +1349,19 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_aux_init(dp_drv);
+ mdss_dp_phy_initialize(dp_drv);
+ mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
mdss_dp_phy_reset(&dp_drv->ctrl_io);
mdss_dp_aux_reset(&dp_drv->ctrl_io);
- mdss_dp_phy_initialize(dp_drv);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n",
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
+ pr_debug("plug Orientation = %d\n",
+ usbpd_get_plug_orientation(dp_drv->pd));
+
mdss_dp_phy_aux_setup(&dp_drv->phy_io);
mdss_dp_irq_enable(dp_drv);
@@ -1264,8 +1381,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
goto edid_error;
}
- mdss_dp_send_cable_notification(dp_drv, true);
- mdss_dp_set_audio_switch_node(dp_drv, true);
+ mdss_dp_notify_clients(dp_drv, true);
dp_drv->dp_initialized = true;
return ret;
@@ -1274,6 +1390,7 @@ edid_error:
mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false);
clk_error:
mdss_dp_regulator_ctrl(dp_drv, false);
+ mdss_dp_config_gpios(dp_drv, false);
vreg_error:
return ret;
}
@@ -1530,7 +1647,7 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
return -EINVAL;
}
- pr_debug("event=%d\n", event);
+ pr_debug("event=%s\n", mdss_panel_intf_event_to_string(event));
dp = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
@@ -1553,6 +1670,7 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
case MDSS_EVENT_BLANK:
if (ops && ops->off)
ops->off(dp->hdcp_data);
+ mdss_dp_mainlink_off(pdata);
break;
case MDSS_EVENT_FB_REGISTERED:
fbi = (struct fb_info *)arg;
@@ -1702,23 +1820,22 @@ static void mdss_dp_do_link_train(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_event_work(struct work_struct *work)
{
struct mdss_dp_drv_pdata *dp = NULL;
- struct delayed_work *dw = to_delayed_work(work);
unsigned long flag;
- u32 todo = 0, dp_config_pkt[2];
+ u32 todo = 0, config;
- if (!dw) {
+ if (!work) {
pr_err("invalid work structure\n");
return;
}
- dp = container_of(dw, struct mdss_dp_drv_pdata, dwork);
+ dp = container_of(work, struct mdss_dp_drv_pdata, work);
spin_lock_irqsave(&dp->event_lock, flag);
todo = dp->current_event;
dp->current_event = 0;
spin_unlock_irqrestore(&dp->event_lock, flag);
- pr_debug("todo=%x\n", todo);
+ pr_debug("todo=%s\n", mdss_dp_ev_event_to_string(todo));
switch (todo) {
case EV_EDID_READ:
@@ -1756,11 +1873,9 @@ static void mdss_dp_event_work(struct work_struct *work)
SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
break;
case EV_USBPD_DP_CONFIGURE:
- dp_config_pkt[0] = SVDM_HDR(USB_C_DP_SID, VDM_VERSION, 0x1,
- SVDM_CMD_TYPE_INITIATOR, DP_VDM_CONFIGURE);
- dp_config_pkt[1] = mdss_dp_usbpd_gen_config_pkt(dp);
+ config = mdss_dp_usbpd_gen_config_pkt(dp);
usbpd_send_svdm(dp->pd, USB_C_DP_SID, DP_VDM_CONFIGURE,
- SVDM_CMD_TYPE_INITIATOR, 0x1, dp_config_pkt, 0x2);
+ SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
break;
default:
pr_err("Unknown event:%d\n", todo);
@@ -1771,8 +1886,7 @@ static void dp_send_events(struct mdss_dp_drv_pdata *dp, u32 events)
{
spin_lock(&dp->event_lock);
dp->current_event = events;
- queue_delayed_work(dp->workq,
- &dp->dwork, HZ);
+ queue_work(dp->workq, &dp->work);
spin_unlock(&dp->event_lock);
}
@@ -1848,7 +1962,7 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp)
return -EPERM;
}
- INIT_DELAYED_WORK(&dp->dwork, mdss_dp_event_work);
+ INIT_WORK(&dp->work, mdss_dp_event_work);
return 0;
}
@@ -1882,9 +1996,48 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
pr_debug("cable disconnected\n");
mutex_lock(&dp_drv->pd_msg_mutex);
dp_drv->cable_connected = false;
+ dp_drv->alt_mode.current_state = UNKNOWN_STATE;
mutex_unlock(&dp_drv->pd_msg_mutex);
- mdss_dp_send_cable_notification(dp_drv, false);
- mdss_dp_set_audio_switch_node(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
+}
+
+static int mdss_dp_validate_callback(u8 cmd,
+ enum usbpd_svdm_cmd_type cmd_type, int num_vdos)
+{
+ int ret = 0;
+
+ if (cmd_type == SVDM_CMD_TYPE_RESP_NAK) {
+ pr_err("error: NACK\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (cmd_type == SVDM_CMD_TYPE_RESP_BUSY) {
+ pr_err("error: BUSY\n");
+ ret = -EBUSY;
+ goto end;
+ }
+
+ if (cmd == USBPD_SVDM_ATTENTION) {
+ if (cmd_type != SVDM_CMD_TYPE_INITIATOR) {
+ pr_err("error: invalid cmd type for attention\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (!num_vdos) {
+ pr_err("error: no vdo provided\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ } else {
+ if (cmd_type != SVDM_CMD_TYPE_RESP_ACK) {
+ pr_err("error: invalid cmd type\n");
+ ret = -EINVAL;
+ }
+ }
+end:
+ return ret;
}
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
@@ -1902,81 +2055,51 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
pr_debug("callback -> cmd: 0x%x, *vdos = 0x%x, num_vdos = %d\n",
cmd, *vdos, num_vdos);
+ if (mdss_dp_validate_callback(cmd, cmd_type, num_vdos))
+ return;
+
switch (cmd) {
case USBPD_SVDM_DISCOVER_MODES:
- if (cmd_type == SVDM_CMD_TYPE_RESP_ACK) {
- dp_drv->alt_mode.dp_cap.response = *vdos;
- mdss_dp_usbpd_ext_capabilities
- (&dp_drv->alt_mode.dp_cap);
- dp_drv->alt_mode.current_state = DISCOVER_MODES_DONE;
- dp_send_events(dp_drv, EV_USBPD_ENTER_MODE);
- } else {
- pr_err("unknown response: %d for Discover_modes\n",
- cmd_type);
- }
+ dp_drv->alt_mode.dp_cap.response = *vdos;
+ mdss_dp_usbpd_ext_capabilities(&dp_drv->alt_mode.dp_cap);
+ dp_drv->alt_mode.current_state |= DISCOVER_MODES_DONE;
+ dp_send_events(dp_drv, EV_USBPD_ENTER_MODE);
break;
case USBPD_SVDM_ENTER_MODE:
- if (cmd_type == SVDM_CMD_TYPE_RESP_ACK) {
- dp_drv->alt_mode.current_state = ENTER_MODE_DONE;
- dp_send_events(dp_drv, EV_USBPD_DP_STATUS);
- } else {
- pr_err("unknown response: %d for Enter_mode\n",
- cmd_type);
- }
+ dp_drv->alt_mode.current_state |= ENTER_MODE_DONE;
+ dp_send_events(dp_drv, EV_USBPD_DP_STATUS);
break;
case USBPD_SVDM_ATTENTION:
- if (cmd_type == SVDM_CMD_TYPE_INITIATOR) {
- pr_debug("Attention. cmd_type=%d\n",
- cmd_type);
- if (!(dp_drv->alt_mode.current_state
- == ENTER_MODE_DONE)) {
- pr_debug("sending discover_mode\n");
- dp_send_events(dp_drv, EV_USBPD_DISCOVER_MODES);
- break;
- }
- if (num_vdos == 1) {
- dp_drv->alt_mode.dp_status.response = *vdos;
- mdss_dp_usbpd_ext_dp_status
- (&dp_drv->alt_mode.dp_status);
- if (dp_drv->alt_mode.dp_status.hpd_high) {
- pr_debug("HPD high\n");
- dp_drv->alt_mode.current_state =
- DP_STATUS_DONE;
- dp_send_events
- (dp_drv, EV_USBPD_DP_CONFIGURE);
- }
- }
- } else {
- pr_debug("unknown response: %d for Attention\n",
- cmd_type);
- }
+ dp_drv->alt_mode.dp_status.response = *vdos;
+ mdss_dp_usbpd_ext_dp_status(&dp_drv->alt_mode.dp_status);
+
+ if (!dp_drv->alt_mode.dp_status.hpd_high)
+ return;
+
+ pr_debug("HPD high\n");
+
+ dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
+
+ if (dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE)
+ mdss_dp_host_init(&dp_drv->panel_data);
+ else
+ dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
break;
case DP_VDM_STATUS:
- if (cmd_type == SVDM_CMD_TYPE_RESP_ACK) {
- dp_drv->alt_mode.dp_status.response = *vdos;
- mdss_dp_usbpd_ext_dp_status
- (&dp_drv->alt_mode.dp_status);
- if (dp_drv->alt_mode.dp_status.hpd_high) {
- pr_debug("HDP high\n");
- dp_drv->alt_mode.current_state =
- DP_STATUS_DONE;
- dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
- }
- } else {
- pr_err("unknown response: %d for DP_Status\n",
- cmd_type);
+ dp_drv->alt_mode.dp_status.response = *vdos;
+ mdss_dp_usbpd_ext_dp_status(&dp_drv->alt_mode.dp_status);
+
+ if (!(dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE)) {
+ dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
+ dp_send_events(dp_drv, EV_USBPD_DP_CONFIGURE);
}
break;
case DP_VDM_CONFIGURE:
- if ((dp_drv->cable_connected == true)
- || (cmd_type == SVDM_CMD_TYPE_RESP_ACK)) {
- dp_drv->alt_mode.current_state = DP_CONFIGURE_DONE;
- pr_debug("config USBPD to DP done\n");
+ dp_drv->alt_mode.current_state |= DP_CONFIGURE_DONE;
+ pr_debug("config USBPD to DP done\n");
+
+ if (dp_drv->alt_mode.dp_status.hpd_high)
mdss_dp_host_init(&dp_drv->panel_data);
- } else {
- pr_err("unknown response: %d for DP_Configure\n",
- cmd_type);
- }
break;
default:
pr_err("unknown cmd: %d\n", cmd);
@@ -2135,6 +2258,8 @@ static int mdss_dp_probe(struct platform_device *pdev)
mdss_dp_device_register(dp_drv);
dp_drv->inited = true;
+ dp_drv->wait_for_audio_comp = false;
+ init_completion(&dp_drv->audio_comp);
pr_debug("done\n");
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 4710cf7a98e2..8d5af4dc5bf3 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -105,6 +105,7 @@
EDP_INTR_FRAME_END | EDP_INTR_CRC_UPDATED)
#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
+#define EV_EVENT_STR(x) #x
struct edp_buf {
char *start; /* buffer start addr */
@@ -170,12 +171,12 @@ struct usbpd_dp_status {
};
enum dp_alt_mode_state {
- ALT_MODE_INIT_STATE = 0,
- DISCOVER_MODES_DONE,
- ENTER_MODE_DONE,
- DP_STATUS_DONE,
- DP_CONFIGURE_DONE,
- UNKNOWN_STATE,
+ UNKNOWN_STATE = 0,
+ ALT_MODE_INIT_STATE = BIT(0),
+ DISCOVER_MODES_DONE = BIT(1),
+ ENTER_MODE_DONE = BIT(2),
+ DP_STATUS_DONE = BIT(3),
+ DP_CONFIGURE_DONE = BIT(4),
};
struct dp_alt_mode {
@@ -399,6 +400,7 @@ struct mdss_dp_drv_pdata {
struct completion train_comp;
struct completion idle_comp;
struct completion video_comp;
+ struct completion audio_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
@@ -423,10 +425,11 @@ struct mdss_dp_drv_pdata {
char delay_start;
u32 bpp;
struct dp_statistic dp_stat;
+ bool wait_for_audio_comp;
/* event */
struct workqueue_struct *workq;
- struct delayed_work dwork;
+ struct work_struct work;
u32 current_event;
spinlock_t event_lock;
spinlock_t lock;
@@ -463,6 +466,28 @@ static inline const char *__mdss_dp_pm_supply_node_name(
}
}
+static inline char *mdss_dp_ev_event_to_string(int event)
+{
+ switch (event) {
+ case EV_EDP_AUX_SETUP:
+ return EV_EVENT_STR(EV_EDP_AUX_SETUP);
+ case EV_EDID_READ:
+ return EV_EVENT_STR(EV_EDID_READ);
+ case EV_DPCD_CAP_READ:
+ return EV_EVENT_STR(EV_DPCD_CAP_READ);
+ case EV_DPCD_STATUS_READ:
+ return EV_EVENT_STR(EV_DPCD_STATUS_READ);
+ case EV_LINK_TRAIN:
+ return EV_EVENT_STR(EV_LINK_TRAIN);
+ case EV_IDLE_PATTERNS_SENT:
+ return EV_EVENT_STR(EV_IDLE_PATTERNS_SENT);
+ case EV_VIDEO_READY:
+ return EV_EVENT_STR(EV_VIDEO_READY);
+ default:
+ return "unknown";
+ }
+}
+
void mdss_dp_phy_initialize(struct mdss_dp_drv_pdata *dp);
void mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *dp);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index d9297a7af764..3c525b0dac4f 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -374,7 +374,19 @@ static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
/*
* edid standard header bytes
*/
-static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+static bool dp_edid_is_valid_header(u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
+ if (buf[i] != edid_hdr[i])
+ return false;
+ }
+
+ return true;
+}
int dp_edid_buf_error(char *buf, int len)
{
@@ -396,11 +408,6 @@ int dp_edid_buf_error(char *buf, int len)
return -EINVAL;
}
- if (strncmp(buf, edid_hdr, strlen(edid_hdr))) {
- pr_err("Error: header\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -510,11 +517,20 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n",
pinfo->clk_rate, pinfo->bpp, lane_cnt);
- min_link_rate = (u32)div_u64((pinfo->clk_rate * 10),
- (lane_cnt * encoding_factx10));
- min_link_rate = (min_link_rate * pinfo->bpp)
- / (DP_LINK_RATE_MULTIPLIER);
+
+ /*
+ * The max pixel clock supported is 675Mhz. The
+ * current calculations below will make sure
+ * the min_link_rate is within 32 bit limits.
+ * Any changes in the section of code should
+ * consider this limitation.
+ */
+ min_link_rate = pinfo->clk_rate
+ / (lane_cnt * encoding_factx10);
min_link_rate /= ln_to_link_ratio;
+ min_link_rate = (min_link_rate * pinfo->bpp);
+ min_link_rate = (u32)div_u64(min_link_rate * 10,
+ DP_LINK_RATE_MULTIPLIER);
pr_debug("min_link_rate = %d\n", min_link_rate);
@@ -699,10 +715,11 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
{
- struct edp_buf *rp;
- int cnt, rlen;
- int ret = 0;
- int blk_num = 0;
+ struct edp_buf *rp = &dp->rxp;
+ int rlen, ret = 0;
+ int edid_blk = 0, blk_num = 0, retries = 10;
+ bool edid_parsing_done = false;
+ const u8 cea_tag = 0x02;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -710,70 +727,56 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
return ret;
}
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS, EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, block=%d, rlen=%d\n",
- cnt, blk_num, rlen);
-
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ do {
+ rlen = dp_aux_read_buf(dp, EDID_START_ADDRESS +
+ (blk_num * EDID_BLOCK_SIZE),
+ EDID_BLOCK_SIZE, 1);
+ if (rlen != EDID_BLOCK_SIZE) {
+ pr_err("Read failed. rlen=%d\n", rlen);
+ continue;
}
- }
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
- rp = &dp->rxp;
+ if (dp_edid_is_valid_header(rp->data)) {
+ if (dp_edid_buf_error(rp->data, rp->len))
+ continue;
- dp_extract_edid_manufacturer(&dp->edid, rp->data);
- dp_extract_edid_product(&dp->edid, rp->data);
- dp_extract_edid_version(&dp->edid, rp->data);
- dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
- dp_extract_edid_video_support(&dp->edid, rp->data);
- dp_extract_edid_feature(&dp->edid, rp->data);
- dp_extract_edid_detailed_timing_description(&dp->edid, rp->data);
- /* for the first block initialize the edid buffer size */
- dp->edid_buf_size = 0;
+ if (edid_parsing_done) {
+ blk_num++;
+ continue;
+ }
- pr_debug("edid extension = %d\n",
- dp->edid.ext_block_cnt);
+ dp_extract_edid_manufacturer(&dp->edid, rp->data);
+ dp_extract_edid_product(&dp->edid, rp->data);
+ dp_extract_edid_version(&dp->edid, rp->data);
+ dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
+ dp_extract_edid_video_support(&dp->edid, rp->data);
+ dp_extract_edid_feature(&dp->edid, rp->data);
+ dp_extract_edid_detailed_timing_description(&dp->edid,
+ rp->data);
- memcpy(dp->edid_buf, rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
+ edid_parsing_done = true;
+ } else {
+ edid_blk++;
+ blk_num++;
- if (!dp->edid.ext_block_cnt)
- return 0;
+ /* fix dongle byte shift issue */
+ if (edid_blk == 1 && rp->data[0] != cea_tag) {
+ u8 tmp[EDID_BLOCK_SIZE - 1];
- for (blk_num = 1; blk_num <= dp->edid.ext_block_cnt;
- blk_num++) {
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, blk_num=%d, rlen=%d\n",
- cnt, blk_num, rlen);
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ memcpy(tmp, rp->data, EDID_BLOCK_SIZE - 1);
+ rp->data[0] = cea_tag;
+ memcpy(rp->data + 1, tmp, EDID_BLOCK_SIZE - 1);
}
}
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ memcpy(dp->edid_buf + (edid_blk * EDID_BLOCK_SIZE),
+ rp->data, EDID_BLOCK_SIZE);
- memcpy(dp->edid_buf + (blk_num * EDID_BLOCK_SIZE),
- rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
- }
+ if (edid_blk == dp->edid.ext_block_cnt)
+ return 0;
+ } while (retries--);
return 0;
}
@@ -1113,17 +1116,17 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train)
}
char vm_pre_emphasis[4][4] = {
- {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+ {0x00, 0x09, 0x11, 0x0C}, /* pe0, 0 db */
+ {0x00, 0x0A, 0x10, 0xFF}, /* pe1, 3.5 db */
+ {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x00, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
char vm_voltage_swing[4][4] = {
- {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x07, 0x0f, 0x12, 0x1E}, /* sw0, 0.4v */
+ {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+ {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
@@ -1171,8 +1174,9 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
pr_debug("Entered++");
dp_host_train_set(ep, 0x01); /* train_1 */
- dp_voltage_pre_emphasise_set(ep);
+ dp_cap_lane_rate_set(ep);
dp_train_pattern_set_write(ep, 0x21); /* train_1 */
+ dp_voltage_pre_emphasise_set(ep);
tries = 0;
old_v_level = ep->v_level;
@@ -1211,7 +1215,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
{
- int tries;
+ int tries = 0;
int ret = 0;
int usleep_time;
char pattern;
@@ -1223,12 +1227,12 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
else
pattern = 0x02;
- dp_host_train_set(ep, pattern); /* train_2 */
- dp_voltage_pre_emphasise_set(ep);
dp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
- tries = 0;
- while (1) {
+ do {
+ dp_voltage_pre_emphasise_set(ep);
+ dp_host_train_set(ep, pattern);
+
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
@@ -1240,14 +1244,13 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
tries++;
- if (tries > 5) {
+ if (tries > 4) {
ret = -1;
break;
}
dp_sink_train_set_adjust(ep);
- dp_voltage_pre_emphasise_set(ep);
- }
+ } while (1);
return ret;
}
@@ -1319,7 +1322,6 @@ static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
- int usleep_time;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -1335,13 +1337,10 @@ int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
train_start:
dp->v_level = 0; /* start from default level */
dp->p_level = 0;
- dp_cap_lane_rate_set(dp);
mdss_dp_config_ctrl(dp);
mdss_dp_state_ctrl(&dp->ctrl_io, 0);
dp_clear_training_pattern(dp);
- usleep_time = dp->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time);
ret = dp_start_link_train_1(dp);
if (ret < 0) {
@@ -1356,8 +1355,6 @@ train_start:
pr_debug("Training 1 completed successfully\n");
- mdss_dp_state_ctrl(&dp->ctrl_io, 0);
- dp_clear_training_pattern(dp);
ret = dp_start_link_train_2(dp);
if (ret < 0) {
if (dp_link_rate_down_shift(dp) == 0) {
@@ -1375,7 +1372,8 @@ train_start:
clear:
dp_clear_training_pattern(dp);
if (ret != -1) {
- mdss_dp_setup_tr_unit(&dp->ctrl_io);
+ mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate,
+ dp->lane_cnt, dp->vic);
mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO);
pr_debug("State_ctrl set to SEND_VIDEO\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index bdf5d92f7053..b1eb8e0c9579 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -32,6 +32,29 @@
#define AUDIO_FREQ_48 48000
#define DP_AUDIO_FREQ_COUNT 3
+enum mdss_dp_pin_assignment {
+ PIN_ASSIGNMENT_A,
+ PIN_ASSIGNMENT_B,
+ PIN_ASSIGNMENT_C,
+ PIN_ASSIGNMENT_D,
+ PIN_ASSIGNMENT_E,
+ PIN_ASSIGNMENT_F,
+ PIN_ASSIGNMENT_MAX,
+};
+
+static const char *mdss_dp_pin_name(u8 pin)
+{
+ switch (pin) {
+ case PIN_ASSIGNMENT_A: return "PIN_ASSIGNMENT_A";
+ case PIN_ASSIGNMENT_B: return "PIN_ASSIGNMENT_B";
+ case PIN_ASSIGNMENT_C: return "PIN_ASSIGNMENT_C";
+ case PIN_ASSIGNMENT_D: return "PIN_ASSIGNMENT_D";
+ case PIN_ASSIGNMENT_E: return "PIN_ASSIGNMENT_E";
+ case PIN_ASSIGNMENT_F: return "PIN_ASSIGNMENT_F";
+ default: return "UNKNOWN";
+ }
+}
+
static const uint32_t naud_value[DP_AUDIO_FREQ_COUNT][DP_AUDIO_FREQ_COUNT] = {
{ 10125, 16875, 33750 },
{ 5625, 9375, 18750 },
@@ -143,6 +166,18 @@ void mdss_dp_aux_reset(struct dss_io_data *ctrl_io)
writel_relaxed(aux_ctrl, ctrl_io->base + DP_AUX_CTRL);
}
+/* reset DP controller */
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io)
+{
+ u32 sw_reset = readl_relaxed(ctrl_io->base + DP_SW_RESET);
+
+ sw_reset |= BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+ udelay(1000);
+ sw_reset &= ~BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+}
+
/* reset DP Mainlink */
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io)
{
@@ -284,13 +319,47 @@ void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io)
writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID);
}
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io)
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res)
{
- /* Current Tr unit configuration supports only 1080p */
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table const *tu_entry = tu_table;
+
writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0);
- writel_relaxed(0x0f0016, ctrl_io->base + DP_VALID_BOUNDARY);
- writel_relaxed(0x1f, ctrl_io->base + DP_TU);
- writel_relaxed(0x0, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) {
+ if ((tu_entry->vic == res) &&
+ (tu_entry->lanes == ln_cnt) &&
+ (tu_entry->lrate == link_rate))
+ break;
+ }
+
+ if (tu_entry == tu_table + ARRAY_SIZE(tu_table)) {
+ pr_err("requested ln_cnt=%d, lrate=0x%x not supported\n",
+ ln_cnt, link_rate);
+ return;
+ }
+
+ dp_tu |= tu_entry->tu_size_minus1;
+ valid_boundary |= tu_entry->valid_boundary_link;
+ valid_boundary |= (tu_entry->delay_start_link << 16);
+
+ valid_boundary2 |= (tu_entry->valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_entry->upper_boundary_count << 16);
+ valid_boundary2 |= (tu_entry->lower_boundary_count << 20);
+
+ if (tu_entry->boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ writel_relaxed(valid_boundary, ctrl_io->base + DP_VALID_BOUNDARY);
+ writel_relaxed(dp_tu, ctrl_io->base + DP_TU);
+ writel_relaxed(valid_boundary2, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ pr_debug("valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ valid_boundary, valid_boundary2);
+ pr_debug("dp_tu=0x%x\n", dp_tu);
}
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
@@ -426,14 +495,36 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status)
dp_status->hpd_irq =
(buf & BIT(8)) ? true : false;
+ pr_debug("low_pow_st = %d, adaptor_dp_en = %d, multi_func = %d\n",
+ dp_status->low_pow_st, dp_status->adaptor_dp_en,
+ dp_status->multi_func);
+ pr_debug("switch_to_usb_config = %d, exit_dp_mode = %d, hpd_high =%d\n",
+ dp_status->switch_to_usb_config,
+ dp_status->exit_dp_mode, dp_status->hpd_high);
+ pr_debug("hpd_irq = %d\n", dp_status->hpd_irq);
+
mdss_dp_initialize_s_port(&dp_status->c_port, port);
}
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
{
+ u8 pin_cfg, pin;
u32 config = 0;
- config |= (dp->alt_mode.dp_cap.dlink_pin_config << 8);
+ pin_cfg = dp->alt_mode.dp_cap.dlink_pin_config;
+
+ for (pin = PIN_ASSIGNMENT_A; pin < PIN_ASSIGNMENT_MAX; pin++) {
+ if (pin_cfg & BIT(pin))
+ break;
+ }
+
+ if (pin == PIN_ASSIGNMENT_MAX)
+ pin = PIN_ASSIGNMENT_C;
+
+ pr_debug("pin assignment: %s\n", mdss_dp_pin_name(pin));
+
+ config |= BIT(pin) << 8;
+
config |= (0x1 << 2); /* configure for DPv1.3 */
config |= 0x2; /* Configuring for UFP_D */
@@ -441,6 +532,17 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
return config;
}
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt)
+{
+ u32 info = 0x0;
+
+ info |= (ln_cnt & 0x0F);
+ info |= ((orientation & 0x0F) << 4);
+ pr_debug("Shared Info = 0x%x\n", info);
+ writel_relaxed(info, phy_io->base + DP_PHY_SPARE0);
+}
+
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate)
{
u32 acr_ctrl = 0;
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 5eb9d092476f..cf2286f9b58a 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -150,6 +150,8 @@
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000044)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048)
+#define DP_PHY_SPARE0 0x00A8
+
#define QSERDES_TX0_OFFSET 0x0400
#define QSERDES_TX1_OFFSET 0x0800
@@ -200,17 +202,72 @@ struct edp_cmd {
char next; /* next command */
};
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+static const struct dp_vc_tu_mapping_table tu_table[] = {
+ {HDMI_VFRMT_640x480p60_4_3, 4, 06, 24,
+ 0x07, 0x0056, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 2, 06, 24,
+ 0x0e, 0x004f, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 1, 06, 24,
+ 0x15, 0x0039, false, 0x00, 0x00, 0x00, 0x2c},
+ {HDMI_VFRMT_720x480p60_4_3, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_720x480p60_16_9, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_1280x720p60_16_9, 4, 06, 24,
+ 0x0c, 0x0020, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 2, 06, 24,
+ 0x16, 0x0015, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 1, 10, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 4, 06, 24,
+ 0x16, 0x000f, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1920x1080p60_16_9, 2, 10, 24,
+ 0x21, 0x0011, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 1, 20, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p30_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p60_16_9, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p24_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p30_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p60_256_135, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_EVFRMT_4096x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+};
+
int dp_aux_read(void *ep, struct edp_cmd *cmds);
int dp_aux_write(void *ep, struct edp_cmd *cmd);
void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io);
u32 mdss_dp_get_phy_hw_version(struct dss_io_data *phy_io);
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io);
void mdss_dp_aux_reset(struct dss_io_data *ctrl_io);
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io);
void mdss_dp_phy_reset(struct dss_io_data *ctrl_io);
void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io);
void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io);
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res);
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
@@ -231,6 +288,8 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
struct lane_mapping l_map);
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt);
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
char link_rate);
void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 8ffba091e2b2..01fc01425a3a 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -856,6 +856,48 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
return 0;
}
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int len, rc;
+ const u32 *src;
+ u32 tmp;
+ u32 max_delay_us;
+
+ pinfo->mdp_koff_thshold = false;
+ src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+ if (!src || (len == 0))
+ return;
+
+ rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+ if (!rc)
+ pinfo->mdp_koff_delay = tmp;
+ else
+ return;
+
+ if (pinfo->mipi.frame_rate == 0) {
+ pr_err("cannot enable guard window, unexpected panel fps\n");
+ return;
+ }
+
+ pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+ pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+ max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+ /* enable the feature if threshold is valid */
+ if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+ ((pinfo->mdp_koff_delay > 0) ||
+ (pinfo->mdp_koff_delay < max_delay_us)))
+ pinfo->mdp_koff_thshold = true;
+
+ pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+ pinfo->mdp_koff_thshold_low,
+ pinfo->mdp_koff_thshold_high,
+ pinfo->mdp_koff_delay,
+ max_delay_us,
+ pinfo->mdp_koff_thshold);
+}
+
static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
char *trigger_key)
{
@@ -2497,6 +2539,8 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+ mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
pinfo->mipi.lp11_init = of_property_read_bool(np,
"qcom,mdss-dsi-lp11-init");
rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 4f1435d006b2..2047a047b537 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -698,7 +698,6 @@ static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
}
}
- DEV_DBG("%s: '%s'\n", __func__, buf);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
return ret;
@@ -1567,7 +1566,9 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
frame_data = (active_h + blank_h) * (active_v + blank_v);
if (frame_data) {
- int refresh_rate_khz = (pixel_clk * khz_to_hz) / frame_data;
+ u64 refresh_rate = (u64)pixel_clk * khz_to_hz * khz_to_hz;
+
+ do_div(refresh_rate, frame_data);
timing.active_h = active_h;
timing.front_porch_h = front_porch_h;
@@ -1582,19 +1583,24 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
(front_porch_v + pulse_width_v);
timing.active_low_v = active_low_v;
timing.pixel_freq = pixel_clk;
- timing.refresh_rate = refresh_rate_khz * khz_to_hz;
+ timing.refresh_rate = refresh_rate;
timing.interlaced = interlaced;
timing.supported = true;
timing.ar = aspect_ratio_4_3 ? HDMI_RES_AR_4_3 :
(aspect_ratio_5_4 ? HDMI_RES_AR_5_4 :
HDMI_RES_AR_16_9);
- DEV_DBG("%s: new res: %dx%d%s@%dHz\n", __func__,
+ DEV_DBG("%s: new res: %dx%d%s@%d.%d%d%dHz\n", __func__,
timing.active_h, timing.active_v,
interlaced ? "i" : "p",
- timing.refresh_rate / khz_to_hz);
-
- rc = hdmi_set_resv_timing_info(&timing);
+ timing.refresh_rate / khz_to_hz,
+ (timing.refresh_rate % khz_to_hz) / 100,
+ (timing.refresh_rate % 100) / 10,
+ timing.refresh_rate % 10);
+
+ rc = hdmi_get_video_id_code(&timing, NULL);
+ if (rc < 0)
+ rc = hdmi_set_resv_timing_info(&timing);
} else {
DEV_ERR("%s: Invalid frame data\n", __func__);
rc = -EINVAL;
@@ -1642,6 +1648,7 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
+ u32 i = 0;
if (video_format >= HDMI_VFRMT_MAX) {
DEV_ERR("%s: video format: %s is not supported\n", __func__,
@@ -1653,6 +1660,15 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
video_format, msm_hdmi_mode_2string(video_format),
supported ? "Supported" : "Not-Supported");
+ for (i = 0; i < sink_data->num_of_elements; i++) {
+ u32 vic = disp_mode_list[i].video_format;
+
+ if (vic == video_format) {
+ DEV_DBG("%s: vic %d already added\n", __func__, vic);
+ return;
+ }
+ }
+
if (!ret && supported) {
/* todo: MHL */
disp_mode_list[sink_data->num_of_elements].video_format =
@@ -1970,6 +1986,7 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
const u8 *svd = NULL;
u32 has60hz_mode = false;
u32 has50hz_mode = false;
+ u32 desc_offset = 0;
bool read_block0_res = false;
struct hdmi_edid_sink_data *sink_data = NULL;
@@ -2033,103 +2050,66 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
if (video_format == HDMI_VFRMT_640x480p60_4_3)
has480p = true;
}
- } else if (!num_of_cea_blocks || read_block0_res) {
- /* Detailed timing descriptors */
- u32 desc_offset = 0;
- /*
- * * Maximum 4 timing descriptor in block 0 - No CEA
- * extension in this case
- * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
- * descriptor
- * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
- * timing descriptor has block size of 18
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
-
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
-
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
-
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
- }
- } else if (1 == num_of_cea_blocks) {
- u32 desc_offset = 0;
-
- /*
- * Read from both block 0 and block 1
- * Read EDID block[0] as above
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
+ }
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ i = 0;
+ /* Read DTD resolutions from block0 */
+ while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk0+0x36+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
+ }
- /*
- * * Parse block 1 - CEA extension byte offset of first
- * detailed timing generation - offset is relevant to
- * the offset of block 1
- * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
- * extension first timing desc - indicate the offset of
- * the first detailed timing descriptor
- * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
- */
- desc_offset = edid_blk1[0x02];
- while (0 != edid_blk1[desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk1+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ /*
+ * * Parse block 1 - CEA extension byte offset of first
+ * detailed timing generation - offset is relevant to
+ * the offset of block 1
+ * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+ * extension first timing desc - indicate the offset of
+ * the first detailed timing descriptor
+ * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
+ */
+ desc_offset = edid_blk1[0x02];
+ i = 0;
+ while (!edid_blk1[desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk1+desc_offset,
+ &video_format);
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
}
std_blk = 0;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index ace796163fa4..94cc2f2dc370 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -3566,54 +3566,6 @@ static int hdmi_tx_hdcp_off(struct hdmi_tx_ctrl *hdmi_ctrl)
return rc;
}
-static char *hdmi_tx_get_event_name(int event)
-{
- switch (event) {
- case MDSS_EVENT_RESET:
- return HDMI_TX_EVT_STR(MDSS_EVENT_RESET);
- case MDSS_EVENT_LINK_READY:
- return HDMI_TX_EVT_STR(MDSS_EVENT_LINK_READY);
- case MDSS_EVENT_UNBLANK:
- return HDMI_TX_EVT_STR(MDSS_EVENT_UNBLANK);
- case MDSS_EVENT_PANEL_ON:
- return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_ON);
- case MDSS_EVENT_BLANK:
- return HDMI_TX_EVT_STR(MDSS_EVENT_BLANK);
- case MDSS_EVENT_PANEL_OFF:
- return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_OFF);
- case MDSS_EVENT_CLOSE:
- return HDMI_TX_EVT_STR(MDSS_EVENT_CLOSE);
- case MDSS_EVENT_SUSPEND:
- return HDMI_TX_EVT_STR(MDSS_EVENT_SUSPEND);
- case MDSS_EVENT_RESUME:
- return HDMI_TX_EVT_STR(MDSS_EVENT_RESUME);
- case MDSS_EVENT_CHECK_PARAMS:
- return HDMI_TX_EVT_STR(MDSS_EVENT_CHECK_PARAMS);
- case MDSS_EVENT_CONT_SPLASH_BEGIN:
- return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_BEGIN);
- case MDSS_EVENT_CONT_SPLASH_FINISH:
- return HDMI_TX_EVT_STR(MDSS_EVENT_CONT_SPLASH_FINISH);
- case MDSS_EVENT_PANEL_UPDATE_FPS:
- return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_UPDATE_FPS);
- case MDSS_EVENT_FB_REGISTERED:
- return HDMI_TX_EVT_STR(MDSS_EVENT_FB_REGISTERED);
- case MDSS_EVENT_PANEL_CLK_CTRL:
- return HDMI_TX_EVT_STR(MDSS_EVENT_PANEL_CLK_CTRL);
- case MDSS_EVENT_DSI_CMDLIST_KOFF:
- return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_CMDLIST_KOFF);
- case MDSS_EVENT_ENABLE_PARTIAL_ROI:
- return HDMI_TX_EVT_STR(MDSS_EVENT_ENABLE_PARTIAL_ROI);
- case MDSS_EVENT_DSI_STREAM_SIZE:
- return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_STREAM_SIZE);
- case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
- return HDMI_TX_EVT_STR(MDSS_EVENT_DSI_DYNAMIC_SWITCH);
- case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
- return HDMI_TX_EVT_STR(MDSS_EVENT_REGISTER_RECOVERY_HANDLER);
- default:
- return "unknown";
- }
-}
-
static void hdmi_tx_update_fps(struct hdmi_tx_ctrl *hdmi_ctrl)
{
void *pdata = pdata = hdmi_tx_get_fd(HDMI_TX_FEAT_PANEL);
@@ -3918,7 +3870,8 @@ static int hdmi_tx_event_handler(struct mdss_panel_data *panel_data,
hdmi_ctrl->evt_arg = arg;
DEV_DBG("%s: event = %s suspend=%d, hpd_feature=%d\n", __func__,
- hdmi_tx_get_event_name(event), hdmi_ctrl->panel_suspend,
+ mdss_panel_intf_event_to_string(event),
+ hdmi_ctrl->panel_suspend,
hdmi_ctrl->hpd_feature_on);
handler = hdmi_ctrl->evt_handler[event];
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 9ed909e9a387..c9fc8ba8bfdb 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -560,7 +560,7 @@ int msm_hdmi_get_timing_info(
int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
struct hdmi_util_ds_data *ds_data, u32 mode)
{
- int ret;
+ int ret, i = 0;
if (!info)
return -EINVAL;
@@ -570,9 +570,23 @@ int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
ret = msm_hdmi_get_timing_info(info, mode);
- if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
- if (info->pixel_freq > ds_data->ds_max_clk)
- info->supported = false;
+ if (!ret && ds_data && ds_data->ds_registered) {
+ if (ds_data->ds_max_clk) {
+ if (info->pixel_freq > ds_data->ds_max_clk)
+ info->supported = false;
+ }
+
+ if (ds_data->modes_num) {
+ u32 *modes = ds_data->modes;
+
+ for (i = 0; i < ds_data->modes_num; i++) {
+ if (info->video_format == *modes++)
+ break;
+ }
+
+ if (i == ds_data->modes_num)
+ info->supported = false;
+ }
}
return ret;
@@ -625,7 +639,7 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
{
int i, vic = -1;
struct msm_hdmi_mode_timing_info supported_timing = {0};
- u32 ret;
+ u32 ret, pclk_delta, pclk, fps_delta, fps;
if (!timing_in) {
pr_err("invalid input\n");
@@ -633,9 +647,16 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
}
/* active_low_h, active_low_v and interlaced are not checked against */
- for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ for (i = 1; i < HDMI_VFRMT_MAX; i++) {
ret = hdmi_get_supported_mode(&supported_timing, ds_data, i);
+ pclk = supported_timing.pixel_freq;
+ fps = supported_timing.refresh_rate;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk_delta = (pclk / HDMI_KHZ_TO_HZ) * 5;
+ fps_delta = (fps / HDMI_KHZ_TO_HZ) * 5;
+
if (ret || !supported_timing.supported)
continue;
if (timing_in->active_h != supported_timing.active_h)
@@ -654,9 +675,11 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
continue;
if (timing_in->back_porch_v != supported_timing.back_porch_v)
continue;
- if (timing_in->pixel_freq != supported_timing.pixel_freq)
+ if (timing_in->pixel_freq < (pclk - pclk_delta) ||
+ timing_in->pixel_freq > (pclk + pclk_delta))
continue;
- if (timing_in->refresh_rate != supported_timing.refresh_rate)
+ if (timing_in->refresh_rate < (fps - fps_delta) ||
+ timing_in->refresh_rate > (fps + fps_delta))
continue;
vic = (int)supported_timing.video_format;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index e65cf915fe92..8a7e4d1ebafc 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -459,6 +459,8 @@ struct hdmi_tx_ddc_ctrl {
struct hdmi_util_ds_data {
bool ds_registered;
u32 ds_max_clk;
+ u32 modes_num;
+ u32 *modes;
};
static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 81e3438befca..a0637109c7b3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1345,7 +1345,12 @@ int mdss_iommu_ctrl(int enable)
return mdata->iommu_ref_cnt;
}
-static void mdss_mdp_memory_retention_enter(void)
+#define MEM_RETAIN_ON 1
+#define MEM_RETAIN_OFF 0
+#define PERIPH_RETAIN_ON 1
+#define PERIPH_RETAIN_OFF 0
+
+static void mdss_mdp_memory_retention_ctrl(bool mem_ctrl, bool periph_ctrl)
{
struct clk *mdss_mdp_clk = NULL;
struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
@@ -1366,49 +1371,35 @@ static void mdss_mdp_memory_retention_enter(void)
__mdss_mdp_reg_access_clk_enable(mdata, true);
if (mdss_mdp_clk) {
- clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
- clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
- clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
- }
-
- if (mdss_mdp_lut_clk) {
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_MEM);
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET);
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_PERIPH);
- }
- __mdss_mdp_reg_access_clk_enable(mdata, false);
-}
-
-static void mdss_mdp_memory_retention_exit(void)
-{
- struct clk *mdss_mdp_clk = NULL;
- struct clk *mdp_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
- struct clk *mdss_mdp_lut_clk = NULL;
- struct clk *mdp_lut_vote_clk = mdss_mdp_get_clk(MDSS_CLK_MDP_LUT);
- struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ if (mem_ctrl)
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_MEM);
- if (mdp_vote_clk) {
- if (test_bit(MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED,
- mdata->mdss_caps_map)) {
- mdss_mdp_clk = mdp_vote_clk;
- mdss_mdp_lut_clk = mdp_lut_vote_clk;
+ if (periph_ctrl) {
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
} else {
- mdss_mdp_clk = clk_get_parent(mdp_vote_clk);
- mdss_mdp_lut_clk = clk_get_parent(mdp_lut_vote_clk);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_SET);
+ clk_set_flags(mdss_mdp_clk, CLKFLAG_NORETAIN_PERIPH);
}
}
- __mdss_mdp_reg_access_clk_enable(mdata, true);
- if (mdss_mdp_clk) {
- clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_MEM);
- clk_set_flags(mdss_mdp_clk, CLKFLAG_RETAIN_PERIPH);
- clk_set_flags(mdss_mdp_clk, CLKFLAG_PERIPH_OFF_CLEAR);
- }
-
if (mdss_mdp_lut_clk) {
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_MEM);
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH);
- clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_CLEAR);
+ if (mem_ctrl)
+ clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_NORETAIN_MEM);
+
+ if (periph_ctrl) {
+ clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(mdss_mdp_lut_clk,
+ CLKFLAG_PERIPH_OFF_CLEAR);
+ } else {
+ clk_set_flags(mdss_mdp_lut_clk, CLKFLAG_PERIPH_OFF_SET);
+ clk_set_flags(mdss_mdp_lut_clk,
+ CLKFLAG_NORETAIN_PERIPH);
+ }
}
__mdss_mdp_reg_access_clk_enable(mdata, false);
}
@@ -1441,17 +1432,21 @@ static int mdss_mdp_idle_pc_restore(void)
mdss_hw_init(mdata);
mdss_iommu_ctrl(0);
- /**
- * sleep 10 microseconds to make sure AD auto-reinitialization
- * is done
- */
- udelay(10);
- mdss_mdp_memory_retention_exit();
-
mdss_mdp_ctl_restore(true);
mdata->idle_pc = false;
end:
+ if (mdata->mem_retain) {
+ /**
+ * sleep 10 microseconds to make sure AD auto-reinitialization
+ * is done
+ */
+ udelay(10);
+ mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+ PERIPH_RETAIN_ON);
+ mdata->mem_retain = false;
+ }
+
mutex_unlock(&mdp_fs_idle_pc_lock);
return rc;
}
@@ -1980,7 +1975,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
case MDSS_MDP_HW_REV_300:
case MDSS_MDP_HW_REV_301:
mdata->max_target_zorder = 7; /* excluding base layer */
- mdata->max_cursor_size = 384;
+ mdata->max_cursor_size = 512;
mdata->per_pipe_ib_factor.numer = 8;
mdata->per_pipe_ib_factor.denom = 5;
mdata->apply_post_scale_bytes = false;
@@ -1992,6 +1987,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map);
set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
mdata->enable_cdp = false; /* disable cdp */
@@ -4910,10 +4907,12 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
* Turning off GDSC while overlays are still
* active.
*/
+
+ mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
+ PERIPH_RETAIN_OFF);
mdata->idle_pc = true;
pr_debug("idle pc. active overlays=%d\n",
active_cnt);
- mdss_mdp_memory_retention_enter();
} else {
/*
* Advise RPM to turn MMSS GDSC off during
@@ -4925,7 +4924,11 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
mdss_mdp_cx_ctrl(mdata, false);
mdss_mdp_batfet_ctrl(mdata, false);
+ mdss_mdp_memory_retention_ctrl(
+ MEM_RETAIN_OFF,
+ PERIPH_RETAIN_OFF);
}
+ mdata->mem_retain = true;
if (mdata->en_svs_high)
mdss_mdp_config_cx_voltage(mdata, false);
regulator_disable(mdata->fs);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 0085163ada52..8ac63aaaefce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -122,6 +122,11 @@
*/
#define MDSS_MDP_DS_OVERFETCH_SIZE 5
+#define QOS_LUT_NRT_READ 0x0
+#define QOS_LUT_CWB_READ 0xe4000000
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -407,7 +412,7 @@ struct mdss_mdp_cwb {
struct list_head data_queue;
int valid;
u32 wb_idx;
- struct mdp_output_layer *layer;
+ struct mdp_output_layer layer;
void *priv_data;
struct msm_sync_pt_data cwb_sync_pt_data;
struct blocking_notifier_head notifier_head;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ebc7d2144eb9..eb1e0b5c47a6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3424,6 +3424,7 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
mutex_lock(&cwb->queue_lock);
cwb_data = list_first_entry_or_null(&cwb->data_queue,
struct mdss_mdp_wb_data, next);
+ __list_del_entry(&cwb_data->next);
mutex_unlock(&cwb->queue_lock);
if (cwb_data == NULL) {
pr_err("no output buffer for cwb\n");
@@ -3453,14 +3454,14 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE;
/* Select CWB data point */
- data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
+ data_point = (cwb->layer.flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]);
if (sctl)
writel_relaxed(data_point + 1,
mdata->mdp_base + mdata->ppb_ctl[3]);
- /* Flush WB */
- ctl->flush_bits |= BIT(16);
+ /* Flush WB and CTL */
+ ctl->flush_bits |= BIT(16) | BIT(17);
opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode;
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
@@ -3469,6 +3470,10 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode;
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode);
}
+
+ /* Increase commit count to signal CWB release fence */
+ atomic_inc(&cwb->cwb_sync_pt_data.commit_cnt);
+
goto cwb_setup_done;
cwb_setup_fail:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 76fd2d12ac95..294e05c2fbb0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -541,6 +541,10 @@ enum mdss_mdp_writeback_index {
#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C
#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define MDSS_MDP_REG_WB_DANGER_LUT 0x084
+#define MDSS_MDP_REG_WB_SAFE_LUT 0x088
+#define MDSS_MDP_REG_WB_CREQ_LUT 0x08c
+#define MDSS_MDP_REG_WB_QOS_CTRL 0x090
#define MDSS_MDP_REG_WB_CSC_BASE 0x260
#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
#define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 72d6175686b7..4eb121f01aca 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -73,6 +73,7 @@ struct mdss_mdp_cmd_ctx {
struct mutex clk_mtx;
spinlock_t clk_lock;
spinlock_t koff_lock;
+ spinlock_t ctlstart_lock;
struct work_struct gate_clk_work;
struct delayed_work delayed_off_clk_work;
struct work_struct pp_done_work;
@@ -144,15 +145,11 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
u32 init;
u32 height;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
- if (!mixer) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (!mixer)
goto exit;
- }
}
init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -160,10 +157,8 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
height = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
- if (height < init) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (height < init)
goto exit;
- }
cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -173,13 +168,21 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
else
cnt -= init;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
exit:
return cnt;
}
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+ u32 ret;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ret = mdss_mdp_cmd_line_count(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -2677,12 +2680,42 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
return 0;
}
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+ u32 line_count;
+ u32 sline_count = 0;
+ bool ret = true;
+ u32 low_threshold = pinfo->mdp_koff_thshold_low;
+ u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+ /* read the line count */
+ line_count = mdss_mdp_cmd_line_count(ctl);
+ if (sctl)
+ sline_count = mdss_mdp_cmd_line_count(sctl);
+
+ /* if line count is between the range, return to trigger transfer */
+ if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+ (!sctl || ((sline_count > low_threshold) &&
+ (sline_count < high_threshold))))
+ ret = false;
+
+ pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+ pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+ MDSS_XLOG(line_count, sline_count, ret);
+
+ return ret;
+}
static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_cmd_ctx *ctx)
+ struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool is_pp_split = is_pingpong_split(ctl->mfd);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (ctl->panel_data)
+ pinfo = &ctl->panel_data->panel_info;
MDSS_XLOG(ctx->autorefresh_state);
@@ -2707,9 +2740,33 @@ static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
} else {
+
+ /*
+ * Some panels can require that mdp is within some range
+ * of the scanlines in order to trigger the tansfer.
+ * If that is the case, make sure the panel scanline
+ * is within the limit to start.
+ * Acquire an spinlock for this operation to raise the
+ * priority of this thread and make sure the context
+ * is maintained, so we can have the less time possible
+ * between the check of the scanline and the kickoff.
+ */
+ if (pinfo && pinfo->mdp_koff_thshold) {
+ spin_lock(&ctx->ctlstart_lock);
+ if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+ spin_unlock(&ctx->ctlstart_lock);
+ usleep_range(pinfo->mdp_koff_delay,
+ pinfo->mdp_koff_delay + 10);
+ spin_lock(&ctx->ctlstart_lock);
+ }
+ }
+
/* SW Kickoff */
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+ if (pinfo && pinfo->mdp_koff_thshold)
+ spin_unlock(&ctx->ctlstart_lock);
}
}
@@ -2841,7 +2898,7 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
}
/* Kickoff */
- __mdss_mdp_kickoff(ctl, ctx);
+ __mdss_mdp_kickoff(ctl, sctl, ctx);
mdss_mdp_cmd_post_programming(ctl);
@@ -3267,6 +3324,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
init_completion(&ctx->autorefresh_done);
spin_lock_init(&ctx->clk_lock);
spin_lock_init(&ctx->koff_lock);
+ spin_lock_init(&ctx->ctlstart_lock);
mutex_init(&ctx->clk_mtx);
mutex_init(&ctx->mdp_rdptr_lock);
mutex_init(&ctx->mdp_wrptr_lock);
@@ -3557,7 +3615,7 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
- ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 40b10e368309..e6e03e7d54b2 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -124,6 +124,30 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
writel_relaxed(val, ctx->base + reg);
}
+static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx)
+{
+ u32 wb_qos_setup = QOS_LUT_NRT_READ;
+ struct mdss_mdp_cwb *cwb = NULL;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
+ return;
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ cwb = &mdp5_data->cwb;
+
+ if (cwb->valid)
+ wb_qos_setup = QOS_LUT_CWB_READ;
+ else
+ wb_qos_setup = QOS_LUT_NRT_READ;
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DANGER_LUT, PANIC_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_SAFE_LUT, ROBUST_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_CREQ_LUT, wb_qos_setup);
+}
+
static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx,
int is_wfd)
{
@@ -447,7 +471,7 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
cwb = &mdp5_data->cwb;
ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data;
- buffer = &cwb->layer->buffer;
+ buffer = &cwb->layer.buffer;
ctx->opmode = 0;
ctx->img_width = buffer->width;
@@ -495,6 +519,8 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
return ret;
}
@@ -897,6 +923,8 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, true);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
wb_args = (struct mdss_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 91d4332700b6..e26b3843d7b0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -1618,8 +1618,8 @@ static bool __multirect_validate_rects(struct mdp_input_layer **layers,
/* resolution related validation */
if (mdss_rect_overlap_check(&dst[0], &dst[1])) {
pr_err("multirect dst overlap is not allowed. input: %d,%d,%d,%d paired %d,%d,%d,%d\n",
- dst[0].x, dst[0].y, dst[0].w, dst[0].y,
- dst[1].x, dst[1].y, dst[1].w, dst[1].y);
+ dst[0].x, dst[0].y, dst[0].w, dst[0].h,
+ dst[1].x, dst[1].y, dst[1].w, dst[1].h);
return false;
}
@@ -2285,12 +2285,12 @@ end:
return ret;
}
-int __is_cwb_requested(uint32_t output_layer_flags)
+int __is_cwb_requested(uint32_t commit_flags)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int req = 0;
- req = output_layer_flags & MDP_COMMIT_CWB_EN;
+ req = commit_flags & MDP_COMMIT_CWB_EN;
if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) {
pr_err("CWB not supported");
return -ENODEV;
@@ -2330,7 +2330,7 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
return -EINVAL;
if (commit->output_layer) {
- ret = __is_cwb_requested(commit->output_layer->flags);
+ ret = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(ret)) {
return ret;
} else if (ret) {
@@ -2493,7 +2493,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
if (commit->output_layer) {
- rc = __is_cwb_requested(commit->output_layer->flags);
+ rc = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(rc)) {
return rc;
} else if (rc) {
@@ -2553,7 +2553,7 @@ int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd,
return rc;
}
- mdp5_data->cwb.layer = commit->output_layer;
+ mdp5_data->cwb.layer = *commit->output_layer;
mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx;
mutex_lock(&mdp5_data->cwb.queue_lock);
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index a633528b5373..be0491195263 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -54,6 +54,7 @@ struct panel_id {
#define DP_PANEL 12 /* LVDS */
#define DSC_PPS_LEN 128
+#define INTF_EVENT_STR(x) #x
static inline const char *mdss_panel2str(u32 panel)
{
@@ -270,6 +271,78 @@ enum mdss_intf_events {
MDSS_EVENT_MAX,
};
+/**
+ * mdss_panel_intf_event_to_string() - converts interface event enum to string
+ * @event: interface event to be converted to string representation
+ */
+static inline char *mdss_panel_intf_event_to_string(int event)
+{
+ switch (event) {
+ case MDSS_EVENT_RESET:
+ return INTF_EVENT_STR(MDSS_EVENT_RESET);
+ case MDSS_EVENT_LINK_READY:
+ return INTF_EVENT_STR(MDSS_EVENT_LINK_READY);
+ case MDSS_EVENT_UNBLANK:
+ return INTF_EVENT_STR(MDSS_EVENT_UNBLANK);
+ case MDSS_EVENT_PANEL_ON:
+ return INTF_EVENT_STR(MDSS_EVENT_PANEL_ON);
+ case MDSS_EVENT_POST_PANEL_ON:
+ return INTF_EVENT_STR(MDSS_EVENT_POST_PANEL_ON);
+ case MDSS_EVENT_BLANK:
+ return INTF_EVENT_STR(MDSS_EVENT_BLANK);
+ case MDSS_EVENT_PANEL_OFF:
+ return INTF_EVENT_STR(MDSS_EVENT_PANEL_OFF);
+ case MDSS_EVENT_CLOSE:
+ return INTF_EVENT_STR(MDSS_EVENT_CLOSE);
+ case MDSS_EVENT_SUSPEND:
+ return INTF_EVENT_STR(MDSS_EVENT_SUSPEND);
+ case MDSS_EVENT_RESUME:
+ return INTF_EVENT_STR(MDSS_EVENT_RESUME);
+ case MDSS_EVENT_CHECK_PARAMS:
+ return INTF_EVENT_STR(MDSS_EVENT_CHECK_PARAMS);
+ case MDSS_EVENT_CONT_SPLASH_BEGIN:
+ return INTF_EVENT_STR(MDSS_EVENT_CONT_SPLASH_BEGIN);
+ case MDSS_EVENT_CONT_SPLASH_FINISH:
+ return INTF_EVENT_STR(MDSS_EVENT_CONT_SPLASH_FINISH);
+ case MDSS_EVENT_PANEL_UPDATE_FPS:
+ return INTF_EVENT_STR(MDSS_EVENT_PANEL_UPDATE_FPS);
+ case MDSS_EVENT_FB_REGISTERED:
+ return INTF_EVENT_STR(MDSS_EVENT_FB_REGISTERED);
+ case MDSS_EVENT_PANEL_CLK_CTRL:
+ return INTF_EVENT_STR(MDSS_EVENT_PANEL_CLK_CTRL);
+ case MDSS_EVENT_DSI_CMDLIST_KOFF:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_CMDLIST_KOFF);
+ case MDSS_EVENT_ENABLE_PARTIAL_ROI:
+ return INTF_EVENT_STR(MDSS_EVENT_ENABLE_PARTIAL_ROI);
+ case MDSS_EVENT_DSC_PPS_SEND:
+ return INTF_EVENT_STR(MDSS_EVENT_DSC_PPS_SEND);
+ case MDSS_EVENT_DSI_STREAM_SIZE:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_STREAM_SIZE);
+ case MDSS_EVENT_DSI_UPDATE_PANEL_DATA:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_UPDATE_PANEL_DATA);
+ case MDSS_EVENT_REGISTER_RECOVERY_HANDLER:
+ return INTF_EVENT_STR(MDSS_EVENT_REGISTER_RECOVERY_HANDLER);
+ case MDSS_EVENT_REGISTER_MDP_CALLBACK:
+ return INTF_EVENT_STR(MDSS_EVENT_REGISTER_MDP_CALLBACK);
+ case MDSS_EVENT_DSI_PANEL_STATUS:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_PANEL_STATUS);
+ case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_DYNAMIC_SWITCH);
+ case MDSS_EVENT_DSI_RECONFIG_CMD:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_RECONFIG_CMD);
+ case MDSS_EVENT_DSI_RESET_WRITE_PTR:
+ return INTF_EVENT_STR(MDSS_EVENT_DSI_RESET_WRITE_PTR);
+ case MDSS_EVENT_PANEL_TIMING_SWITCH:
+ return INTF_EVENT_STR(MDSS_EVENT_PANEL_TIMING_SWITCH);
+ case MDSS_EVENT_DEEP_COLOR:
+ return INTF_EVENT_STR(MDSS_EVENT_DEEP_COLOR);
+ case MDSS_EVENT_DISABLE_PANEL:
+ return INTF_EVENT_STR(MDSS_EVENT_DISABLE_PANEL);
+ default:
+ return "unknown";
+ }
+}
+
struct lcd_panel_info {
u32 h_back_porch;
u32 h_front_porch;
@@ -635,6 +708,10 @@ struct mdss_panel_info {
u32 saved_fporch;
/* current fps, once is programmed in hw */
int current_fps;
+ u32 mdp_koff_thshold_low;
+ u32 mdp_koff_thshold_high;
+ bool mdp_koff_thshold;
+ u32 mdp_koff_delay;
int panel_max_fps;
int panel_max_vtotal;
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index b5da4ad1a86b..eab7bcaaa156 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -573,7 +573,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
struct mdss_smmu_domain smmu_domain;
const struct of_device_id *match;
struct dss_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
const __be32 *address = NULL, *size = NULL;
@@ -667,13 +666,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- pr_err("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index e229f52057d4..4899231787f2 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -365,6 +365,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev,
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
@@ -463,6 +464,20 @@ end:
return ret;
}
+static void msm_ext_disp_teardown_done(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data) {
+ pr_err("invalid input");
+ return;
+ }
+
+ data->codec_ops.teardown_done(data->pdev);
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -545,6 +560,8 @@ static int msm_ext_disp_notify(struct platform_device *pdev,
msm_ext_disp_cable_status;
ext_disp->ops->get_intf_id =
msm_ext_disp_get_intf_id;
+ ext_disp->ops->teardown_done =
+ msm_ext_disp_teardown_done;
}
switch_set_state(&ext_disp->audio_sdev, (int)new_state);
@@ -614,6 +631,7 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b15e6edb8f2c..933f1866b811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3602,6 +3602,7 @@ int ext4_can_truncate(struct inode *inode)
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
+#if 0
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
@@ -3725,6 +3726,12 @@ out_dio:
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/include/dt-bindings/clock/msm-clocks-cobalt.h b/include/dt-bindings/clock/msm-clocks-cobalt.h
index b80ea0c31597..251b7e314238 100644
--- a/include/dt-bindings/clock/msm-clocks-cobalt.h
+++ b/include/dt-bindings/clock/msm-clocks-cobalt.h
@@ -447,7 +447,9 @@
#define clk_dsi0pll_pclk_src 0x5efd85d4
#define clk_dsi0pll_pclk_src_mux 0x84b14663
#define clk_dsi0pll_post_bit_div 0xf46dcf27
-#define clk_dsi0pll_post_vco_div 0x8ee956ff
+#define clk_dsi0pll_post_vco_mux 0xfaf9bd1f
+#define clk_dsi0pll_post_vco_div1 0xabb50b2a
+#define clk_dsi0pll_post_vco_div4 0xbe51c091
#define clk_dsi0pll_bitclk_src 0x36c3c437
#define clk_dsi0pll_vco_clk 0x15940d40
@@ -457,7 +459,9 @@
#define clk_dsi1pll_pclk_src 0xeddcd80e
#define clk_dsi1pll_pclk_src_mux 0x3651feb3
#define clk_dsi1pll_post_bit_div 0x712f0260
-#define clk_dsi1pll_post_vco_div 0x623e04de
+#define clk_dsi1pll_post_vco_mux 0xc6a90d20
+#define clk_dsi1pll_post_vco_div1 0x6f47ca7d
+#define clk_dsi1pll_post_vco_div4 0x90628974
#define clk_dsi1pll_bitclk_src 0x13ab045b
#define clk_dsi1pll_vco_clk 0x99797b50
diff --git a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
index 0bbcbd28af33..aa76fbad5083 100644
--- a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
+++ b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
@@ -195,6 +195,8 @@
#define GCC_UFS_ICE_CORE_HW_CTL_CLK 180
#define GCC_UFS_PHY_AUX_HW_CTL_CLK 181
#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 182
+#define HLOS1_VOTE_TURING_ADSP_SMMU_CLK 183
+#define HLOS2_VOTE_TURING_ADSP_SMMU_CLK 184
/* Block resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
@@ -207,52 +209,4 @@
#define GCC_USB_30_BCR 7
#define GCC_USB_PHY_CFG_AHB2PHY_BCR 8
-/* RPM controlled clocks */
-#define RPM_CE1_CLK 1
-#define RPM_CE1_A_CLK 2
-#define RPM_CXO_CLK_SRC 3
-#define RPM_BIMC_CLK 4
-#define RPM_BIMC_A_CLK 5
-#define RPM_CNOC_CLK 6
-#define RPM_CNOC_A_CLK 7
-#define RPM_SNOC_CLK 8
-#define RPM_SNOC_A_CLK 9
-#define RPM_CNOC_PERIPH_CLK 10
-#define RPM_CNOC_PERIPH_A_CLK 11
-#define RPM_CNOC_PERIPH_KEEPALIVE_A_CLK 12
-#define RPM_LN_BB_CLK1 13
-#define RPM_LN_BB_CLK1_AO 14
-#define RPM_LN_BB_CLK1_PIN 15
-#define RPM_LN_BB_CLK1_PIN_AO 16
-#define RPM_BIMC_MSMBUS_CLK 17
-#define RPM_BIMC_MSMBUS_A_CLK 18
-#define RPM_CNOC_MSMBUS_CLK 19
-#define RPM_CNOC_MSMBUS_A_CLK 20
-#define RPM_CXO_CLK_SRC_AO 21
-#define RPM_CXO_DWC3_CLK 22
-#define RPM_CXO_LPM_CLK 23
-#define RPM_CXO_OTG_CLK 24
-#define RPM_CXO_PIL_LPASS_CLK 25
-#define RPM_CXO_PIL_SSC_CLK 26
-#define RPM_CXO_PIL_SPSS_CLK 27
-#define RPM_DIV_CLK1 28
-#define RPM_DIV_CLK1_AO 29
-#define RPM_IPA_CLK 30
-#define RPM_IPA_A_CLK 31
-#define RPM_MCD_CE1_CLK 32
-#define RPM_MMSSNOC_AXI_CLK 33
-#define RPM_MMSSNOC_AXI_A_CLK 34
-#define RPM_QCEDEV_CE1_CLK 35
-#define RPM_QCRYPTO_CE1_CLK 36
-#define RPM_QDSS_CLK 37
-#define RPM_QDSS_A_CLK 38
-#define RPM_QSEECOM_CE1_CLK 39
-#define RPM_RF_CLK2 40
-#define RPM_RF_CLK2_AO 41
-#define RPM_SCM_CE1_CLK 42
-#define RPM_SNOC_MSMBUS_CLK 43
-#define RPM_SNOC_MSMBUS_A_CLK 44
-#define RPM_AGGRE2_NOC_CLK 45
-#define RPM_AGGRE2_NOC_A_CLK 46
-
#endif
diff --git a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
index 427c6aae05d3..2ef1e34db3a1 100644
--- a/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
+++ b/include/dt-bindings/clock/qcom,gpu-msmfalcon.h
@@ -14,27 +14,32 @@
#ifndef _DT_BINDINGS_CLK_MSM_GPU_FALCON_H
#define _DT_BINDINGS_CLK_MSM_GPU_FALCON_H
-#define GFX3D_CLK_SRC 0
-#define GPU_PLL0_PLL 1
-#define GPU_PLL0_PLL_OUT_AUX 2
-#define GPU_PLL0_PLL_OUT_AUX2 3
-#define GPU_PLL0_PLL_OUT_EARLY 4
-#define GPU_PLL0_PLL_OUT_MAIN 5
-#define GPU_PLL0_PLL_OUT_TEST 6
-#define GPU_PLL1_PLL 7
-#define GPU_PLL1_PLL_OUT_AUX 8
-#define GPU_PLL1_PLL_OUT_AUX2 9
-#define GPU_PLL1_PLL_OUT_EARLY 10
-#define GPU_PLL1_PLL_OUT_MAIN 11
-#define GPU_PLL1_PLL_OUT_TEST 12
-#define GPUCC_CXO_CLK 13
-#define GPUCC_GFX3D_CLK 14
-#define GPUCC_RBBMTIMER_CLK 15
-#define GPUCC_RBCPR_CLK 16
-#define RBBMTIMER_CLK_SRC 18
-#define RBCPR_CLK_SRC 19
+#define GFX3D_CLK_SRC 0
+#define GPU_PLL0_PLL 1
+#define GPU_PLL0_PLL_OUT_AUX 2
+#define GPU_PLL0_PLL_OUT_AUX2 3
+#define GPU_PLL0_PLL_OUT_EARLY 4
+#define GPU_PLL0_PLL_OUT_MAIN 5
+#define GPU_PLL0_PLL_OUT_TEST 6
+#define GPU_PLL1_PLL 7
+#define GPU_PLL1_PLL_OUT_AUX 8
+#define GPU_PLL1_PLL_OUT_AUX2 9
+#define GPU_PLL1_PLL_OUT_EARLY 10
+#define GPU_PLL1_PLL_OUT_MAIN 11
+#define GPU_PLL1_PLL_OUT_TEST 12
+#define GPUCC_CXO_CLK 13
+#define GPUCC_GFX3D_CLK 14
+#define GPUCC_RBBMTIMER_CLK 15
+#define GPUCC_RBCPR_CLK 16
+#define RBBMTIMER_CLK_SRC 17
+#define RBCPR_CLK_SRC 18
-#define GPU_CX_GDSC 0
-#define GPU_GX_GDSC 1
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#define GPUCC_GPU_CX_BCR 0
+#define GPUCC_GPU_GX_BCR 1
+#define GPUCC_RBCPR_BCR 2
+#define GPUCC_SPDM_BCR 3
#endif
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1a96fdaa33d5..e133705d794a 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -26,6 +26,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
#if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(io)
#endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 23026ba6ff25..0de2d0c780d7 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -669,6 +669,9 @@ void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
unsigned long max_rate);
+unsigned long clk_aggregate_rate(struct clk_hw *hw,
+ const struct clk_core *parent);
+
static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
{
dst->clk = src->clk;
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c156f5082758..e221494fb7a0 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -31,6 +31,11 @@
typedef int (*get_static_t)(cpumask_t *cpumask, int interval,
unsigned long voltage, u32 *power);
+struct cpu_cooling_ops {
+ int (*ceil_limit)(int, u32);
+ int (*get_cur_state)(int, unsigned long *);
+};
+
#ifdef CONFIG_CPU_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
@@ -43,6 +48,10 @@ struct thermal_cooling_device *
cpufreq_power_cooling_register(const struct cpumask *clip_cpus,
u32 capacitance, get_static_t plat_static_func);
+struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *ops);
+
/**
* of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
* @np: a valid struct device_node to the cooling device device tree node.
@@ -112,6 +121,13 @@ of_cpufreq_power_cooling_register(struct device_node *np,
return NULL;
}
+static inline struct thermal_cooling_device *
+cpufreq_platform_cooling_register(const struct cpumask *clip_cpus,
+ struct cpu_cooling_ops *ops)
+{
+ return NULL;
+}
+
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 768c44d9ea8b..0ae23ddbc528 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -144,10 +144,10 @@ the appropriate macros. */
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 25
-#define APPS_EVENT_LAST_ID 0x0B14
+#define APPS_EVENT_LAST_ID 0x0B2A
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 118
+#define MSG_SSID_0_LAST 120
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -163,7 +163,7 @@ the appropriate macros. */
#define MSG_SSID_7 4600
#define MSG_SSID_7_LAST 4615
#define MSG_SSID_8 5000
-#define MSG_SSID_8_LAST 5032
+#define MSG_SSID_8_LAST 5033
#define MSG_SSID_9 5500
#define MSG_SSID_9_LAST 5516
#define MSG_SSID_10 6000
@@ -193,7 +193,7 @@ the appropriate macros. */
#define MSG_SSID_22 10350
#define MSG_SSID_22_LAST 10377
#define MSG_SSID_23 10400
-#define MSG_SSID_23_LAST 10415
+#define MSG_SSID_23_LAST 10416
#define MSG_SSID_24 0xC000
#define MSG_SSID_24_LAST 0xC063
@@ -336,7 +336,9 @@ static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_HIGH
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
};
static const uint32_t msg_bld_masks_1[] = {
@@ -535,7 +537,8 @@ static const uint32_t msg_bld_masks_8[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_MED
+ MSG_LVL_MED,
+ MSG_LVL_HIGH
};
static const uint32_t msg_bld_masks_9[] = {
@@ -848,13 +851,14 @@ static const uint32_t msg_bld_masks_23[] = {
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1966, /* EQUIP ID 1 */
+ 0x1A02, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f4f5af978c7c..c34a68ce901a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -121,7 +121,6 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_PT_BASE_ADDR,
DOMAIN_ATTR_SECURE_VMID,
DOMAIN_ATTR_ATOMIC,
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 3df370a9e6d3..4b5a339970fa 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -18,6 +18,9 @@
#define ENABLE_REGULATOR BIT(0)
#define DISABLE_REGULATOR BIT(1)
#define QUERY_MAX_CURRENT BIT(2)
+#define PRE_FLASH BIT(3)
+
+#define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(3, 0)
int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
int *max_current);
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index 085e16d66bc4..a824875096e4 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -800,9 +800,11 @@ enum {
#define WCD934X_VBADC_NEW_ADC_DOUTLSB 0x0731
#define WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL 0x0732
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_VREF_CTL 0x0734
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
#define WCD934X_HPH_NEW_INT_RDAC_MISC1 0x0736
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
#define WCD934X_HPH_NEW_INT_PA_MISC1 0x0737
#define WCD934X_HPH_NEW_INT_PA_MISC2 0x0738
#define WCD934X_HPH_NEW_INT_PA_RDAC_MISC 0x0739
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 873a778d5370..59ba776b5f9b 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -108,6 +108,7 @@ struct msm_ext_disp_audio_codec_ops {
struct msm_ext_disp_audio_edid_blk *blk);
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
+ void (*teardown_done)(struct platform_device *pdev);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index c95a529b029b..fb2607dd365b 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -13,6 +13,14 @@
#define MSM_GSI_H
#include <linux/types.h>
+enum gsi_ver {
+ GSI_VER_ERR = 0,
+ GSI_VER_1_0 = 1,
+ GSI_VER_1_2 = 2,
+ GSI_VER_1_3 = 3,
+ GSI_VER_MAX,
+};
+
enum gsi_status {
GSI_STATUS_SUCCESS = 0,
GSI_STATUS_ERROR = 1,
@@ -65,6 +73,7 @@ enum gsi_intr_type {
/**
* gsi_per_props - Peripheral related properties
*
+ * @gsi: GSI core version
* @ee: EE where this driver and peripheral driver runs
* @intr: control interrupt type
* @intvec: write data for MSI write
@@ -87,6 +96,7 @@ enum gsi_intr_type {
*
*/
struct gsi_per_props {
+ enum gsi_ver ver;
unsigned int ee;
enum gsi_intr_type intr;
uint32_t intvec;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 03853d956b41..18e1a979db76 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -198,6 +198,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
POWER_SUPPLY_PROP_UPDATE_NOW,
POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_BUCK_FREQ,
POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
POWER_SUPPLY_PROP_CHARGE_DONE,
POWER_SUPPLY_PROP_FLASH_ACTIVE,
@@ -216,8 +217,14 @@ enum power_supply_property {
POWER_SUPPLY_PROP_TYPEC_POWER_ROLE,
POWER_SUPPLY_PROP_PD_ALLOWED,
POWER_SUPPLY_PROP_PD_ACTIVE,
+ POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+ POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
+ POWER_SUPPLY_PROP_PARALLEL_PERCENT,
+ POWER_SUPPLY_PROP_PE_START,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index b13ebe50c3d6..7c12823894df 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -212,6 +212,7 @@ struct pmic_revid_data {
u8 pmic_type;
u8 pmic_subtype;
const char *pmic_name;
+ int fab_id;
};
#ifdef CONFIG_QPNP_REVID
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a395d8a9ff73..06acefeffd4c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -356,7 +356,7 @@ extern int lockdep_tasklist_lock_is_held(void);
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
-extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle(struct task_struct *idle, int cpu, bool hotplug);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
@@ -1332,11 +1332,15 @@ struct ravg {
* sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
* demand for tasks.
*
- * 'curr_window' represents task's contribution to cpu busy time
- * statistics (rq->curr_runnable_sum) in current window
+ * 'curr_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the current window
*
- * 'prev_window' represents task's contribution to cpu busy time
- * statistics (rq->prev_runnable_sum) in previous window
+ * 'prev_window_cpu' represents task's contribution to cpu busy time on
+ * various CPUs in the previous window
+ *
+ * 'curr_window' represents the sum of all entries in curr_window_cpu
+ *
+ * 'prev_window' represents the sum of all entries in prev_window_cpu
*
* 'pred_demand' represents task's current predicted cpu busy time
*
@@ -1346,6 +1350,7 @@ struct ravg {
u64 mark_start;
u32 sum, demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
+ u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
u16 active_windows;
u32 pred_demand;
diff --git a/kernel/sched/core_ctl.h b/include/linux/sched/core_ctl.h
index 3b0c12acb9c0..98d7cb3e899b 100644
--- a/kernel/sched/core_ctl.h
+++ b/include/linux/sched/core_ctl.h
@@ -16,9 +16,12 @@
#ifdef CONFIG_SCHED_CORE_CTL
void core_ctl_check(u64 wallclock);
-void core_ctl_set_boost(bool boost);
+int core_ctl_set_boost(bool boost);
#else
static inline void core_ctl_check(u64 wallclock) {}
-static inline void core_ctl_set_boost(bool boost) {}
+static inline int core_ctl_set_boost(bool boost)
+{
+ return 0;
+}
#endif
#endif
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 9fe71c774543..6848454c5447 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -44,6 +44,7 @@ extern unsigned int sysctl_sched_wake_to_idle;
#ifdef CONFIG_SCHED_HMP
extern int sysctl_sched_freq_inc_notify;
extern int sysctl_sched_freq_dec_notify;
+extern unsigned int sysctl_sched_freq_reporting_policy;
extern unsigned int sysctl_sched_window_stats_policy;
extern unsigned int sysctl_sched_ravg_hist_size;
extern unsigned int sysctl_sched_cpu_high_irqload;
diff --git a/include/linux/types.h b/include/linux/types.h
index 70dd3dfde631..9f2d2f46b459 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -9,6 +9,9 @@
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
+#define DECLARE_BITMAP_ARRAY(name,nr,bits) \
+ unsigned long name[nr][BITS_TO_LONGS(bits)]
+
typedef __u32 __kernel_dev_t;
typedef __kernel_fd_set fd_set;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 3740366d9fc5..cef429cf3dce 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -502,7 +502,7 @@ extern void usb_hc_died(struct usb_hcd *hcd);
extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
extern void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum);
-
+extern void usb_flush_hub_wq(void);
extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 473cb5fb375e..7e2f32883aa4 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -123,5 +123,6 @@ extern int icnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
u16 buf_len);
extern int icnss_wlan_set_dfs_nol(const void *info, u16 info_len);
extern int icnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern bool icnss_is_qmi_disable(void);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
index b5425dd7eaea..4117b0d47b0d 100644
--- a/include/soc/qcom/smem.h
+++ b/include/soc/qcom/smem.h
@@ -22,11 +22,11 @@ enum {
SMEM_DSPS,
SMEM_WCNSS,
SMEM_MODEM_Q6_FW,
+ SMEM_CDSP = SMEM_MODEM_Q6_FW,
SMEM_RPM,
SMEM_TZ,
SMEM_SPSS,
SMEM_HYP,
- SMEM_CDSP,
NUM_SMEM_SUBSYSTEMS,
};
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index aa3b363e95e1..2beb9b38a46a 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -65,9 +65,14 @@ enum wdsp_event_type {
WDSP_EVENT_RESUME,
};
-enum wdsp_intr {
+enum wdsp_signal {
+ /* Hardware generated interrupts signalled to manager */
WDSP_IPC1_INTR,
WDSP_ERR_INTR,
+
+ /* Other signals */
+ WDSP_CDC_DOWN_SIGNAL,
+ WDSP_CDC_UP_SIGNAL,
};
/*
@@ -92,7 +97,7 @@ struct wdsp_img_section {
u8 *data;
};
-struct wdsp_err_intr_arg {
+struct wdsp_err_signal_arg {
bool mem_dumps_enabled;
u32 remote_start_addr;
size_t dump_size;
@@ -104,8 +109,9 @@ struct wdsp_err_intr_arg {
* their own ops to manager driver
* @get_dev_for_cmpnt: components can use this to get handle
* to struct device * of any other component
- * @intr_handler: callback to notify manager driver that interrupt
- * has occurred.
+ * @signal_handler: callback to notify manager driver that signal
+ * has occurred. Cannot be called from interrupt
+ * context as this can sleep
* @vote_for_dsp: notifies manager that dsp should be booted up
* @suspend: notifies manager that one component wants to suspend.
* Manager will make sure to suspend all components in order
@@ -120,8 +126,8 @@ struct wdsp_mgr_ops {
struct wdsp_cmpnt_ops *ops);
struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
enum wdsp_cmpnt_type type);
- int (*intr_handler)(struct device *wdsp_dev,
- enum wdsp_intr intr, void *arg);
+ int (*signal_handler)(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg);
int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
int (*suspend)(struct device *wdsp_dev);
int (*resume)(struct device *wdsp_dev);
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index daf69b7df534..7778ff3947de 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -260,6 +260,30 @@ TRACE_EVENT(sched_set_boost,
TP_printk("ref_count=%d", __entry->ref_count)
);
+#if defined(CREATE_TRACE_POINTS) && defined(CONFIG_SCHED_HMP)
+static inline void __window_data(u32 *dst, u32 *src)
+{
+ if (src)
+ memcpy(dst, src, nr_cpu_ids * sizeof(u32));
+ else
+ memset(dst, 0, nr_cpu_ids * sizeof(u32));
+}
+
+struct trace_seq;
+const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
+{
+ int i;
+ const char *ret = p->buffer + seq_buf_used(&p->seq);
+
+ for (i = 0; i < buf_len; i++)
+ trace_seq_printf(p, "%u ", buf[i]);
+
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
TRACE_EVENT(sched_update_task_ravg,
TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
@@ -288,13 +312,17 @@ TRACE_EVENT(sched_update_task_ravg,
__field( u64, rq_ps )
__field( u64, grp_cs )
__field( u64, grp_ps )
- __field( u64, grp_nt_cs )
- __field( u64, grp_nt_ps )
+ __field( u64, grp_nt_cs )
+ __field( u64, grp_nt_ps )
__field( u32, curr_window )
__field( u32, prev_window )
+ __dynamic_array(u32, curr_sum, nr_cpu_ids )
+ __dynamic_array(u32, prev_sum, nr_cpu_ids )
__field( u64, nt_cs )
__field( u64, nt_ps )
__field( u32, active_windows )
+ __field( u8, curr_top )
+ __field( u8, prev_top )
),
TP_fast_assign(
@@ -321,22 +349,30 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
__entry->curr_window = p->ravg.curr_window;
__entry->prev_window = p->ravg.prev_window;
+ __window_data(__get_dynamic_array(curr_sum), p->ravg.curr_window_cpu);
+ __window_data(__get_dynamic_array(prev_sum), p->ravg.prev_window_cpu);
__entry->nt_cs = rq->nt_curr_runnable_sum;
__entry->nt_ps = rq->nt_prev_runnable_sum;
__entry->active_windows = p->ravg.active_windows;
+ __entry->curr_top = rq->curr_top;
+ __entry->prev_top = rq->prev_top;
),
- TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
- , __entry->wallclock, __entry->win_start, __entry->delta,
+ TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u (%s) prev_window %u (%s) nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu curr_top %u prev_top %u",
+ __entry->wallclock, __entry->win_start, __entry->delta,
task_event_names[__entry->evt], __entry->cpu,
__entry->cur_freq, __entry->cur_pid,
__entry->pid, __entry->comm, __entry->mark_start,
__entry->delta_m, __entry->demand,
__entry->sum, __entry->irqtime, __entry->pred_demand,
__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
- __entry->prev_window, __entry->nt_cs, __entry->nt_ps,
+ __window_print(p, __get_dynamic_array(curr_sum), nr_cpu_ids),
+ __entry->prev_window,
+ __window_print(p, __get_dynamic_array(prev_sum), nr_cpu_ids),
+ __entry->nt_cs, __entry->nt_ps,
__entry->active_windows, __entry->grp_cs,
- __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps)
+ __entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps,
+ __entry->curr_top, __entry->prev_top)
);
TRACE_EVENT(sched_get_task_cpu_cycles,
@@ -1287,6 +1323,21 @@ TRACE_EVENT(core_ctl_set_busy,
__entry->is_busy)
);
+TRACE_EVENT(core_ctl_set_boost,
+
+ TP_PROTO(u32 refcount, s32 ret),
+ TP_ARGS(refcount, ret),
+ TP_STRUCT__entry(
+ __field(u32, refcount)
+ __field(s32, ret)
+ ),
+ TP_fast_assign(
+ __entry->refcount = refcount;
+ __entry->ret = ret;
+ ),
+ TP_printk("refcount=%u, ret=%d", __entry->refcount, __entry->ret)
+);
+
/**
* sched_isolate - called when cores are isolated/unisolated
*
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index e14cab59e90a..97eefc665130 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -192,6 +192,64 @@ TRACE_EVENT(cluster_exit,
__entry->from_idle)
);
+TRACE_EVENT(cluster_pred_select,
+
+ TP_PROTO(const char *name, int index, u32 sleep_us,
+ u32 latency, int pred, u32 pred_us),
+
+ TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, index)
+ __field(u32, sleep_us)
+ __field(u32, latency)
+ __field(int, pred)
+ __field(u32, pred_us)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->index = index;
+ __entry->sleep_us = sleep_us;
+ __entry->latency = latency;
+ __entry->pred = pred;
+ __entry->pred_us = pred_us;
+ ),
+
+ TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+ __entry->name, __entry->index, __entry->sleep_us,
+ __entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+ TP_PROTO(const char *name, int idx, u32 resi,
+ u32 sample, u32 tmr),
+
+ TP_ARGS(name, idx, resi, sample, tmr),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, idx)
+ __field(u32, resi)
+ __field(u32, sample)
+ __field(u32, tmr)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->idx = idx;
+ __entry->resi = resi;
+ __entry->sample = sample;
+ __entry->tmr = tmr;
+ ),
+
+ TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+ __entry->name, __entry->idx, __entry->resi,
+ __entry->sample, __entry->tmr)
+);
+
TRACE_EVENT(pre_pc_cb,
TP_PROTO(int tzflag),
diff --git a/include/uapi/sound/wcd-dsp-glink.h b/include/uapi/sound/wcd-dsp-glink.h
index db92e6b41340..39d128d370a0 100644
--- a/include/uapi/sound/wcd-dsp-glink.h
+++ b/include/uapi/sound/wcd-dsp-glink.h
@@ -8,7 +8,9 @@
enum {
WDSP_REG_PKT = 1,
WDSP_CMD_PKT,
+ WDSP_READY_PKT,
};
+#define WDSP_READY_PKT WDSP_READY_PKT
/*
* struct wdsp_reg_pkt - Glink channel information structure format
diff --git a/init/Kconfig b/init/Kconfig
index 311669332867..eb9e1a0aa688 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1002,6 +1002,23 @@ config CGROUP_CPUACCT
config PAGE_COUNTER
bool
+config CGROUP_SCHEDTUNE
+ bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+ depends on SCHED_TUNE
+ help
+ This option provides the "schedtune" controller which improves the
+ flexibility of the task boosting mechanism by introducing the support
+ to define "per task" boost values.
+
+ This new controller:
+ 1. allows only a two layers hierarchy, where the root defines the
+ system-wide boost value and its direct childrens define each one a
+ different "class of tasks" to be boosted with a different value
+ 2. supports up to 16 different task classes, each one which could be
+ configured with a different boost value
+
+ Say N if unsure.
+
config MEMCG
bool "Memory Resource Controller for Control Groups"
select PAGE_COUNTER
diff --git a/init/main.c b/init/main.c
index fbafa271531c..7d4532bff5da 100644
--- a/init/main.c
+++ b/init/main.c
@@ -505,11 +505,6 @@ asmlinkage __visible void __init start_kernel(void)
smp_setup_processor_id();
debug_objects_early_init();
- /*
- * Set up the the initial canary ASAP:
- */
- boot_init_stack_canary();
-
cgroup_init_early();
local_irq_disable();
@@ -523,6 +518,10 @@ asmlinkage __visible void __init start_kernel(void)
page_address_init();
pr_notice("%s", linux_banner);
setup_arch(&command_line);
+ /*
+ * Set up the the initial canary ASAP:
+ */
+ boot_init_stack_canary();
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
diff --git a/kernel/fork.c b/kernel/fork.c
index e89d0bae6f20..8a5962276788 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1684,7 +1684,7 @@ struct task_struct *fork_idle(int cpu)
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
- init_idle(task, cpu);
+ init_idle(task, cpu, false);
}
return task;
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 104432f3d311..dac3724e4c1e 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -78,6 +78,9 @@ void irq_migrate_all_off_this_cpu(void)
bool affinity_broken;
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ff7f6f35fc8f..c07d844c576e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -75,6 +75,7 @@
#include <linux/context_tracking.h>
#include <linux/compiler.h>
#include <linux/irq.h>
+#include <linux/sched/core_ctl.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -85,7 +86,6 @@
#endif
#include "sched.h"
-#include "core_ctl.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
@@ -2255,13 +2255,13 @@ void __dl_clear_params(struct task_struct *p)
void sched_exit(struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
- struct rq *rq = cpu_rq(cpu);
+ struct rq *rq;
u64 wallclock;
sched_set_group_id(p, 0);
- raw_spin_lock_irqsave(&rq->lock, flags);
+ rq = task_rq_lock(p, &flags);
+
/* rq->curr == p */
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
@@ -2269,11 +2269,21 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+
+ kfree(p->ravg.curr_window_cpu);
+ kfree(p->ravg.prev_window_cpu);
+
+ /*
+ * update_task_ravg() can be called for exiting tasks. While the
+ * function itself ensures correct behavior, the corresponding
+ * trace event requires that these pointers be NULL.
+ */
+ p->ravg.curr_window_cpu = NULL;
+ p->ravg.prev_window_cpu = NULL;
+
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
-
- put_cpu();
+ task_rq_unlock(rq, p, &flags);
}
#endif /* CONFIG_SCHED_HMP */
@@ -2377,6 +2387,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
int cpu = get_cpu();
__sched_fork(clone_flags, p);
+ init_new_task_load(p, false);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
@@ -2562,7 +2573,6 @@ void wake_up_new_task(struct task_struct *p)
struct rq *rq;
raw_spin_lock_irqsave(&p->pi_lock, flags);
- init_new_task_load(p);
add_new_task_to_grp(p);
/* Initialize new task's runnable average */
init_entity_runnable_average(&p->se);
@@ -5210,17 +5220,21 @@ void init_idle_bootup_task(struct task_struct *idle)
* init_idle - set up an idle thread for a given CPU
* @idle: task in question
* @cpu: cpu the idle task belongs to
+ * @cpu_up: differentiate between initial boot vs hotplug
*
* NOTE: this function does not set the idle thread's NEED_RESCHED
* flag, to make booting more robust.
*/
-void init_idle(struct task_struct *idle, int cpu)
+void init_idle(struct task_struct *idle, int cpu, bool cpu_up)
{
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
__sched_fork(0, idle);
+ if (!cpu_up)
+ init_new_task_load(idle, true);
+
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_lock(&rq->lock);
@@ -8009,6 +8023,22 @@ void __init sched_init(void)
rq->old_estimated_time = 0;
rq->old_busy_time_group = 0;
rq->hmp_stats.pred_demands_sum = 0;
+ rq->curr_table = 0;
+ rq->prev_top = 0;
+ rq->curr_top = 0;
+
+ for (j = 0; j < NUM_TRACKED_WINDOWS; j++) {
+ memset(&rq->load_subs[j], 0,
+ sizeof(struct load_subtractions));
+
+ rq->top_tasks[j] = kcalloc(NUM_LOAD_INDICES,
+ sizeof(u8), GFP_NOWAIT);
+
+ /* No other choice */
+ BUG_ON(!rq->top_tasks[j]);
+
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[j]);
+ }
#endif
rq->max_idle_balance_cost = sysctl_sched_migration_cost;
@@ -8051,7 +8081,7 @@ void __init sched_init(void)
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
- init_idle(current, smp_processor_id());
+ init_idle(current, smp_processor_id(), false);
calc_load_update = jiffies + LOAD_FREQ;
@@ -8239,7 +8269,7 @@ void set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
-static void free_sched_group(struct task_group *tg)
+static void sched_free_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
@@ -8265,7 +8295,7 @@ struct task_group *sched_create_group(struct task_group *parent)
return tg;
err:
- free_sched_group(tg);
+ sched_free_group(tg);
return ERR_PTR(-ENOMEM);
}
@@ -8285,27 +8315,24 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
}
/* rcu callback to free various structures associated with a task group */
-static void free_sched_group_rcu(struct rcu_head *rhp)
+static void sched_free_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
- free_sched_group(container_of(rhp, struct task_group, rcu));
+ sched_free_group(container_of(rhp, struct task_group, rcu));
}
-/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&tg->rcu, free_sched_group_rcu);
+ call_rcu(&tg->rcu, sched_free_group_rcu);
}
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
- int i;
/* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
+ unregister_fair_sched_group(tg);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
@@ -8756,31 +8783,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
+ sched_online_group(tg, parent);
+
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- struct task_group *parent = css_tg(css->parent);
- if (parent)
- sched_online_group(tg, parent);
- return 0;
+ sched_offline_group(tg);
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- sched_destroy_group(tg);
-}
-
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
-{
- struct task_group *tg = css_tg(css);
-
- sched_offline_group(tg);
+ /*
+ * Relies on the RCU grace period between css_released() and this.
+ */
+ sched_free_group(tg);
}
static void cpu_cgroup_fork(struct task_struct *task, void *private)
@@ -9147,9 +9169,8 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
+ .css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
- .css_online = cpu_cgroup_css_online,
- .css_offline = cpu_cgroup_css_offline,
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index d81886da7ca2..0db85a4fa9c8 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -45,7 +45,7 @@ struct cluster_data {
bool nrrun_changed;
struct task_struct *core_ctl_thread;
unsigned int first_cpu;
- bool boost;
+ unsigned int boost;
struct kobject kobj;
};
@@ -652,17 +652,40 @@ static bool do_check(u64 wallclock)
return do_check;
}
-void core_ctl_set_boost(bool boost)
+int core_ctl_set_boost(bool boost)
{
unsigned int index = 0;
struct cluster_data *cluster;
+ unsigned long flags;
+ int ret = 0;
+ bool boost_state_changed = false;
+ spin_lock_irqsave(&state_lock, flags);
for_each_cluster(cluster, index) {
- if (cluster->is_big_cluster && cluster->boost != boost) {
- cluster->boost = boost;
- apply_need(cluster);
+ if (cluster->is_big_cluster) {
+ if (boost) {
+ boost_state_changed = !cluster->boost;
+ ++cluster->boost;
+ } else {
+ if (!cluster->boost) {
+ pr_err("Error turning off boost. Boost already turned off\n");
+ ret = -EINVAL;
+ } else {
+ --cluster->boost;
+ boost_state_changed = !cluster->boost;
+ }
+ }
+ break;
}
}
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ if (boost_state_changed)
+ apply_need(cluster);
+
+ trace_core_ctl_set_boost(cluster->boost, ret);
+
+ return ret;
}
void core_ctl_check(u64 wallclock)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index b6dc131f36a6..c8c4272c61d8 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -418,6 +418,7 @@ static void sched_debug_header(struct seq_file *m)
P(min_capacity);
P(max_capacity);
P(sched_ravg_window);
+ P(sched_load_granule);
#endif
#undef PN
#undef P
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df23b0365527..e32d4d7903b0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2619,7 +2619,7 @@ struct cluster_cpu_stats {
int best_idle_cpu, least_loaded_cpu;
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
- int best_cpu_cstate;
+ int best_cpu_wakeup_latency;
u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@@ -2827,19 +2827,19 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
- int cpu_cstate;
+ int wakeup_latency;
int prev_cpu = env->prev_cpu;
- cpu_cstate = cpu_rq(cpu)->cstate;
+ wakeup_latency = cpu_rq(cpu)->wakeup_latency;
if (env->need_idle) {
stats->min_cost = cpu_cost;
if (idle_cpu(cpu)) {
- if (cpu_cstate < stats->best_cpu_cstate ||
- (cpu_cstate == stats->best_cpu_cstate &&
- cpu == prev_cpu)) {
+ if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+ (wakeup_latency == stats->best_cpu_wakeup_latency &&
+ cpu == prev_cpu)) {
stats->best_idle_cpu = cpu;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
}
} else {
if (env->cpu_load < stats->min_load ||
@@ -2855,7 +2855,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CPU_COST;
@@ -2864,11 +2864,11 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
/* CPU cost is the same. Start breaking the tie by C-state */
- if (cpu_cstate > stats->best_cpu_cstate)
+ if (wakeup_latency > stats->best_cpu_wakeup_latency)
return;
- if (cpu_cstate < stats->best_cpu_cstate) {
- stats->best_cpu_cstate = cpu_cstate;
+ if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
@@ -2883,8 +2883,8 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
}
if (stats->best_cpu != prev_cpu &&
- ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
- (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+ (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
@@ -2979,7 +2979,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
- stats->best_cpu_cstate = INT_MAX;
+ stats->best_cpu_wakeup_latency = INT_MAX;
/* No need to initialize stats->best_load */
}
@@ -3056,7 +3056,8 @@ bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
static inline bool
wake_to_waker_cluster(struct cpu_select_env *env)
{
- return !env->need_idle && !env->reason && env->sync &&
+ return env->boost_type == SCHED_BOOST_NONE &&
+ !env->need_idle && !env->reason && env->sync &&
task_load(current) > sched_big_waker_task_load &&
task_load(env->p) < sched_small_wakee_task_load;
}
@@ -3189,8 +3190,8 @@ retry:
}
}
p->last_cpu_selected_ts = sched_ktime_clock();
- sbc_flag |= env.sbc_best_cluster_flag;
out:
+ sbc_flag |= env.sbc_best_cluster_flag;
rcu_read_unlock();
trace_sched_task_load(p, sched_boost(), env.reason, env.sync,
env.need_idle, sbc_flag, target);
@@ -9653,11 +9654,8 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se) {
- if (tg->se[i])
- remove_entity_load_avg(tg->se[i]);
+ if (tg->se)
kfree(tg->se[i]);
- }
}
kfree(tg->cfs_rq);
@@ -9705,21 +9703,29 @@ err:
return 0;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ struct rq *rq;
+ int cpu;
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
+ for_each_possible_cpu(cpu) {
+ if (tg->se[cpu])
+ remove_entity_load_avg(tg->se[cpu]);
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ continue;
+
+ rq = cpu_rq(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -9801,7 +9807,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index a0686ea29243..d220482f4dbc 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -18,12 +18,14 @@
#include <linux/list_sort.h>
#include <linux/syscore_ops.h>
#include <linux/of.h>
+#include <linux/sched/core_ctl.h>
#include "sched.h"
-#include "core_ctl.h"
#include <trace/events/sched.h>
+#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
+
const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
@@ -99,7 +101,10 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
rq->cstate = cstate; /* C1, C2 etc */
rq->wakeup_energy = wakeup_energy;
- rq->wakeup_latency = wakeup_latency;
+ /* disregard small latency delta (64 us). */
+ rq->wakeup_latency = ((wakeup_latency >>
+ CSTATE_LATENCY_GRANULARITY_SHIFT) <<
+ CSTATE_LATENCY_GRANULARITY_SHIFT);
}
/*
@@ -196,7 +201,7 @@ int sched_update_freq_max_load(const cpumask_t *cpumask)
entry = &max_load->freqs[i];
freq = costs[i].freq;
hpct = get_freq_max_load(cpu, freq);
- if (hpct <= 0 && hpct > 100)
+ if (hpct <= 0 || hpct > 100)
hpct = 100;
hfreq = div64_u64((u64)freq * hpct, 100);
entry->hdemand =
@@ -585,6 +590,7 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
cluster->dstate_wakeup_latency = 0;
cluster->freq_init_done = false;
+ raw_spin_lock_init(&cluster->load_lock);
cluster->cpus = *cpus;
cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
@@ -642,6 +648,7 @@ void init_clusters(void)
{
bitmap_clear(all_cluster_ids, 0, NR_CPUS);
init_cluster.cpus = *cpu_possible_mask;
+ raw_spin_lock_init(&init_cluster.load_lock);
INIT_LIST_HEAD(&cluster_head);
}
@@ -783,6 +790,12 @@ __read_mostly unsigned int sysctl_sched_new_task_windows = 5;
#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
/*
+ * This governs what load needs to be used when reporting CPU busy time
+ * to the cpufreq governor.
+ */
+__read_mostly unsigned int sysctl_sched_freq_reporting_policy;
+
+/*
* For increase, send notification if
* freq_required - cur_freq > sysctl_sched_freq_inc_notify
*/
@@ -818,15 +831,15 @@ unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
unsigned int
min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
-/* Window size (in ns) */
-__read_mostly unsigned int sched_ravg_window = 10000000;
-
/* Min window size (in ns) = 10ms */
#define MIN_SCHED_RAVG_WINDOW 10000000
/* Max window size (in ns) = 1s */
#define MAX_SCHED_RAVG_WINDOW 1000000000
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = MIN_SCHED_RAVG_WINDOW;
+
/* Temporarily disable window-stats activity on all cpus */
unsigned int __read_mostly sched_disable_window_stats;
@@ -846,6 +859,21 @@ static DEFINE_RWLOCK(related_thread_group_lock);
list_for_each_entry(grp, &related_thread_groups, list)
/*
+ * Task load is categorized into buckets for the purpose of top task tracking.
+ * The entire range of load from 0 to sched_ravg_window needs to be covered
+ * in NUM_LOAD_INDICES number of buckets. Therefore the size of each bucket
+ * is given by sched_ravg_window / NUM_LOAD_INDICES. Since the default value
+ * of sched_ravg_window is MIN_SCHED_RAVG_WINDOW, use that to compute
+ * sched_load_granule.
+ */
+__read_mostly unsigned int sched_load_granule =
+ MIN_SCHED_RAVG_WINDOW / NUM_LOAD_INDICES;
+
+/* Size of bitmaps maintained to track top tasks */
+static const unsigned int top_tasks_bitmap_size =
+ BITS_TO_LONGS(NUM_LOAD_INDICES + 1) * sizeof(unsigned long);
+
+/*
* Demand aggregation for frequency purpose:
*
* 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
@@ -1500,7 +1528,7 @@ static inline int invalid_value(unsigned int *data)
/*
* Handle "atomic" update of sysctl_sched_window_stats_policy,
- * sysctl_sched_ravg_hist_size and sched_freq_legacy_mode variables.
+ * sysctl_sched_ravg_hist_size variables.
*/
int sched_window_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@@ -1606,7 +1634,7 @@ unsigned int cpu_temp(int cpu)
return 0;
}
-void init_new_task_load(struct task_struct *p)
+void init_new_task_load(struct task_struct *p, bool idle_task)
{
int i;
u32 init_load_windows = sched_init_task_load_windows;
@@ -1618,6 +1646,15 @@ void init_new_task_load(struct task_struct *p)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
+ p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
+ p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
+
+ /* Don't have much choice. CPU frequency would be bogus */
+ BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
+
+ if (idle_task)
+ return;
+
if (init_load_pct)
init_load_windows = div64_u64((u64)init_load_pct *
(u64)sched_ravg_window, 100);
@@ -2156,6 +2193,174 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
p->ravg.pred_demand = new;
}
+void clear_top_tasks_bitmap(unsigned long *bitmap)
+{
+ memset(bitmap, 0, top_tasks_bitmap_size);
+ __set_bit(NUM_LOAD_INDICES, bitmap);
+}
+
+/*
+ * Special case the last index and provide a fast path for index = 0.
+ * Note that sched_load_granule can change underneath us if we are not
+ * holding any runqueue locks while calling the two functions below.
+ */
+static u32 top_task_load(struct rq *rq)
+{
+ int index = rq->prev_top;
+ u8 prev = 1 - rq->curr_table;
+
+ if (!index) {
+ int msb = NUM_LOAD_INDICES - 1;
+
+ if (!test_bit(msb, rq->top_tasks_bitmap[prev]))
+ return 0;
+ else
+ return sched_load_granule;
+ } else if (index == NUM_LOAD_INDICES - 1) {
+ return sched_ravg_window;
+ } else {
+ return (index + 1) * sched_load_granule;
+ }
+}
+
+static int load_to_index(u32 load)
+{
+ if (load < sched_load_granule)
+ return 0;
+ else if (load >= sched_ravg_window)
+ return NUM_LOAD_INDICES - 1;
+ else
+ return load / sched_load_granule;
+}
+
+static void update_top_tasks(struct task_struct *p, struct rq *rq,
+ u32 old_curr_window, int new_window, bool full_window)
+{
+ u8 curr = rq->curr_table;
+ u8 prev = 1 - curr;
+ u8 *curr_table = rq->top_tasks[curr];
+ u8 *prev_table = rq->top_tasks[prev];
+ int old_index, new_index, update_index;
+ u32 curr_window = p->ravg.curr_window;
+ u32 prev_window = p->ravg.prev_window;
+ bool zero_index_update;
+
+ if (old_curr_window == curr_window && !new_window)
+ return;
+
+ old_index = load_to_index(old_curr_window);
+ new_index = load_to_index(curr_window);
+
+ if (!new_window) {
+ zero_index_update = !old_curr_window && curr_window;
+ if (old_index != new_index || zero_index_update) {
+ if (old_curr_window)
+ curr_table[old_index] -= 1;
+ if (curr_window)
+ curr_table[new_index] += 1;
+ if (new_index > rq->curr_top)
+ rq->curr_top = new_index;
+ }
+
+ if (!curr_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
+
+ return;
+ }
+
+ /*
+ * The window has rolled over for this task. By the time we get
+ * here, curr/prev swaps would has already occurred. So we need
+ * to use prev_window for the new index.
+ */
+ update_index = load_to_index(prev_window);
+
+ if (full_window) {
+ /*
+ * Two cases here. Either 'p' ran for the entire window or
+ * it didn't run at all. In either case there is no entry
+ * in the prev table. If 'p' ran the entire window, we just
+ * need to create a new entry in the prev table. In this case
+ * update_index will be correspond to sched_ravg_window
+ * so we can unconditionally update the top index.
+ */
+ if (prev_window) {
+ prev_table[update_index] += 1;
+ rq->prev_top = update_index;
+ }
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
+ } else {
+ zero_index_update = !old_curr_window && prev_window;
+ if (old_index != update_index || zero_index_update) {
+ if (old_curr_window)
+ prev_table[old_index] -= 1;
+
+ prev_table[update_index] += 1;
+
+ if (update_index > rq->prev_top)
+ rq->prev_top = update_index;
+
+ if (!prev_table[old_index])
+ __clear_bit(NUM_LOAD_INDICES - old_index - 1,
+ rq->top_tasks_bitmap[prev]);
+
+ if (prev_table[update_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - update_index - 1,
+ rq->top_tasks_bitmap[prev]);
+ }
+ }
+
+ if (curr_window) {
+ curr_table[new_index] += 1;
+
+ if (new_index > rq->curr_top)
+ rq->curr_top = new_index;
+
+ if (curr_table[new_index] == 1)
+ __set_bit(NUM_LOAD_INDICES - new_index - 1,
+ rq->top_tasks_bitmap[curr]);
+ }
+}
+
+static inline void clear_top_tasks_table(u8 *table)
+{
+ memset(table, 0, NUM_LOAD_INDICES * sizeof(u8));
+}
+
+static u32 empty_windows[NR_CPUS];
+
+static void rollover_task_window(struct task_struct *p, bool full_window)
+{
+ u32 *curr_cpu_windows = empty_windows;
+ u32 curr_window;
+ int i;
+
+ /* Rollover the sum */
+ curr_window = 0;
+
+ if (!full_window) {
+ curr_window = p->ravg.curr_window;
+ curr_cpu_windows = p->ravg.curr_window_cpu;
+ }
+
+ p->ravg.prev_window = curr_window;
+ p->ravg.curr_window = 0;
+
+ /* Roll over individual CPU contributions */
+ for (i = 0; i < nr_cpu_ids; i++) {
+ p->ravg.prev_window_cpu[i] = curr_cpu_windows[i];
+ p->ravg.curr_window_cpu[i] = 0;
+ }
+}
+
/*
* Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
*/
@@ -2176,6 +2381,8 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
int prev_sum_reset = 0;
bool new_task;
struct related_thread_group *grp;
+ int cpu = rq->cpu;
+ u32 old_curr_window;
new_window = mark_start < window_start;
if (new_window) {
@@ -2235,57 +2442,43 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* Handle per-task window rollover. We don't care about the idle
* task or exiting tasks.
*/
- if (new_window && !is_idle_task(p) && !exiting_task(p)) {
- u32 curr_window = 0;
-
- if (!full_window)
- curr_window = p->ravg.curr_window;
+ if (!is_idle_task(p) && !exiting_task(p)) {
+ old_curr_window = p->ravg.curr_window;
- p->ravg.prev_window = curr_window;
- p->ravg.curr_window = 0;
+ if (new_window)
+ rollover_task_window(p, full_window);
}
if (flip_counters) {
u64 curr_sum = *curr_runnable_sum;
u64 nt_curr_sum = *nt_curr_runnable_sum;
+ u8 curr_table = rq->curr_table;
+ u8 prev_table = 1 - curr_table;
+ int curr_top = rq->curr_top;
+
+ clear_top_tasks_table(rq->top_tasks[prev_table]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[prev_table]);
- if (prev_sum_reset)
+ if (prev_sum_reset) {
curr_sum = nt_curr_sum = 0;
+ curr_top = 0;
+ clear_top_tasks_table(rq->top_tasks[curr_table]);
+ clear_top_tasks_bitmap(
+ rq->top_tasks_bitmap[curr_table]);
+ }
*prev_runnable_sum = curr_sum;
*nt_prev_runnable_sum = nt_curr_sum;
*curr_runnable_sum = 0;
*nt_curr_runnable_sum = 0;
+ rq->curr_table = prev_table;
+ rq->prev_top = curr_top;
+ rq->curr_top = 0;
}
- if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
- /*
- * account_busy_for_cpu_time() = 0, so no update to the
- * task's current window needs to be made. This could be
- * for example
- *
- * - a wakeup event on a task within the current
- * window (!new_window below, no action required),
- * - switching to a new task from idle (PICK_NEXT_TASK)
- * in a new window where irqtime is 0 and we aren't
- * waiting on IO
- */
-
- if (!new_window)
- return;
-
- /*
- * A new window has started. The RQ demand must be rolled
- * over if p is the current task.
- */
- if (p_is_curr_task) {
- /* p is idle task */
- BUG_ON(p != rq->idle);
- }
-
- return;
- }
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+ goto done;
if (!new_window) {
/*
@@ -2305,10 +2498,12 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (new_task)
*nt_curr_runnable_sum += delta;
- if (!is_idle_task(p) && !exiting_task(p))
+ if (!is_idle_task(p) && !exiting_task(p)) {
p->ravg.curr_window += delta;
+ p->ravg.curr_window_cpu[cpu] += delta;
+ }
- return;
+ goto done;
}
if (!p_is_curr_task) {
@@ -2331,8 +2526,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* contribution to previous completed window.
*/
delta = scale_exec_time(window_start - mark_start, rq);
- if (!exiting_task(p))
+ if (!exiting_task(p)) {
p->ravg.prev_window += delta;
+ p->ravg.prev_window_cpu[cpu] += delta;
+ }
} else {
/*
* Since at least one full window has elapsed,
@@ -2340,8 +2537,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* full window (window_size).
*/
delta = scale_exec_time(window_size, rq);
- if (!exiting_task(p))
+ if (!exiting_task(p)) {
p->ravg.prev_window = delta;
+ p->ravg.prev_window_cpu[cpu] = delta;
+ }
}
*prev_runnable_sum += delta;
@@ -2354,10 +2553,12 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (new_task)
*nt_curr_runnable_sum += delta;
- if (!exiting_task(p))
+ if (!exiting_task(p)) {
p->ravg.curr_window = delta;
+ p->ravg.curr_window_cpu[cpu] = delta;
+ }
- return;
+ goto done;
}
if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
@@ -2381,8 +2582,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* contribution to previous completed window.
*/
delta = scale_exec_time(window_start - mark_start, rq);
- if (!is_idle_task(p) && !exiting_task(p))
+ if (!is_idle_task(p) && !exiting_task(p)) {
p->ravg.prev_window += delta;
+ p->ravg.prev_window_cpu[cpu] += delta;
+ }
} else {
/*
* Since at least one full window has elapsed,
@@ -2390,8 +2593,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* full window (window_size).
*/
delta = scale_exec_time(window_size, rq);
- if (!is_idle_task(p) && !exiting_task(p))
+ if (!is_idle_task(p) && !exiting_task(p)) {
p->ravg.prev_window = delta;
+ p->ravg.prev_window_cpu[cpu] = delta;
+ }
}
/*
@@ -2408,10 +2613,12 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (new_task)
*nt_curr_runnable_sum += delta;
- if (!is_idle_task(p) && !exiting_task(p))
+ if (!is_idle_task(p) && !exiting_task(p)) {
p->ravg.curr_window = delta;
+ p->ravg.curr_window_cpu[cpu] = delta;
+ }
- return;
+ goto done;
}
if (irqtime) {
@@ -2456,7 +2663,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
return;
}
- BUG();
+done:
+ if (!is_idle_task(p) && !exiting_task(p))
+ update_top_tasks(p, rq, old_curr_window,
+ new_window, full_window);
}
static inline u32 predict_and_update_buckets(struct rq *rq,
@@ -2824,11 +3034,23 @@ void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
void reset_task_stats(struct task_struct *p)
{
u32 sum = 0;
+ u32 *curr_window_ptr = NULL;
+ u32 *prev_window_ptr = NULL;
- if (exiting_task(p))
+ if (exiting_task(p)) {
sum = EXITING_TASK_MARKER;
+ } else {
+ curr_window_ptr = p->ravg.curr_window_cpu;
+ prev_window_ptr = p->ravg.prev_window_cpu;
+ memset(curr_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+ memset(prev_window_ptr, 0, sizeof(u32) * nr_cpu_ids);
+ }
memset(&p->ravg, 0, sizeof(struct ravg));
+
+ p->ravg.curr_window_cpu = curr_window_ptr;
+ p->ravg.prev_window_cpu = prev_window_ptr;
+
/* Retain EXITING_TASK marker */
p->ravg.sum_history[0] = sum;
}
@@ -2884,7 +3106,9 @@ static void reset_all_task_stats(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
+ raw_spin_lock(&p->pi_lock);
reset_task_stats(p);
+ raw_spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
@@ -2929,7 +3153,7 @@ const char *sched_window_reset_reasons[] = {
/* Called with IRQs enabled */
void reset_all_window_stats(u64 window_start, unsigned int window_size)
{
- int cpu;
+ int cpu, i;
unsigned long flags;
u64 start_ts = sched_ktime_clock();
int reason = WINDOW_CHANGE;
@@ -2963,6 +3187,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
if (window_size) {
sched_ravg_window = window_size * TICK_NSEC;
set_hmp_defaults();
+ sched_load_granule = sched_ravg_window / NUM_LOAD_INDICES;
}
enable_window_stats();
@@ -2974,6 +3199,16 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
rq->window_start = window_start;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+ memset(&rq->load_subs[i], 0,
+ sizeof(struct load_subtractions));
+ clear_top_tasks_table(rq->top_tasks[i]);
+ clear_top_tasks_bitmap(rq->top_tasks_bitmap[i]);
+ }
+
+ rq->curr_table = 0;
+ rq->curr_top = 0;
+ rq->prev_top = 0;
reset_cpu_hmp_stats(cpu, 1);
}
@@ -3006,6 +3241,59 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
sched_ktime_clock() - start_ts, reason, old, new);
}
+/*
+ * In this function we match the accumulated subtractions with the current
+ * and previous windows we are operating with. Ignore any entries where
+ * the window start in the load_subtraction struct does not match either
+ * the curent or the previous window. This could happen whenever CPUs
+ * become idle or busy with interrupts disabled for an extended period.
+ */
+static inline void account_load_subtractions(struct rq *rq)
+{
+ u64 ws = rq->window_start;
+ u64 prev_ws = ws - sched_ravg_window;
+ struct load_subtractions *ls = rq->load_subs;
+ int i;
+
+ for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+ if (ls[i].window_start == ws) {
+ rq->curr_runnable_sum -= ls[i].subs;
+ rq->nt_curr_runnable_sum -= ls[i].new_subs;
+ } else if (ls[i].window_start == prev_ws) {
+ rq->prev_runnable_sum -= ls[i].subs;
+ rq->nt_prev_runnable_sum -= ls[i].new_subs;
+ }
+
+ ls[i].subs = 0;
+ ls[i].new_subs = 0;
+ }
+
+ BUG_ON((s64)rq->prev_runnable_sum < 0);
+ BUG_ON((s64)rq->curr_runnable_sum < 0);
+ BUG_ON((s64)rq->nt_prev_runnable_sum < 0);
+ BUG_ON((s64)rq->nt_curr_runnable_sum < 0);
+}
+
+static inline u64 freq_policy_load(struct rq *rq, u64 load)
+{
+ unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
+
+ switch (reporting_policy) {
+ case FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK:
+ load = max_t(u64, load, top_task_load(rq));
+ break;
+ case FREQ_REPORT_TOP_TASK:
+ load = top_task_load(rq);
+ break;
+ case FREQ_REPORT_CPU_LOAD:
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return load;
+}
+
static inline void
sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
@@ -3028,6 +3316,7 @@ void sched_get_cpus_busy(struct sched_load *busy,
struct related_thread_group *grp;
u64 total_group_load = 0, total_ngload = 0;
bool aggregate_load = false;
+ struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
if (unlikely(cpus == 0))
return;
@@ -3045,6 +3334,13 @@ void sched_get_cpus_busy(struct sched_load *busy,
window_size = sched_ravg_window;
+ /*
+ * We don't really need the cluster lock for this entire for loop
+ * block. However, there is no advantage in optimizing this as rq
+ * locks are held regardless and would prevent migration anyways
+ */
+ raw_spin_lock(&cluster->load_lock);
+
for_each_cpu(cpu, query_cpus) {
rq = cpu_rq(cpu);
@@ -3052,6 +3348,7 @@ void sched_get_cpus_busy(struct sched_load *busy,
0);
cur_freq[i] = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+ account_load_subtractions(rq);
load[i] = rq->old_busy_time = rq->prev_runnable_sum;
nload[i] = rq->nt_prev_runnable_sum;
pload[i] = rq->hmp_stats.pred_demands_sum;
@@ -3078,6 +3375,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
i++;
}
+ raw_spin_unlock(&cluster->load_lock);
+
for_each_related_thread_group(grp) {
for_each_cpu(cpu, query_cpus) {
/* Protected by rq_lock */
@@ -3112,6 +3411,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
load[i] += group_load[i];
nload[i] += ngload[i];
+
+ load[i] = freq_policy_load(rq, load[i]);
/*
* Scale load in reference to cluster max_possible_freq.
*
@@ -3232,6 +3533,189 @@ int sched_set_window(u64 window_start, unsigned int window_size)
return 0;
}
+static inline void create_subtraction_entry(struct rq *rq, u64 ws, int index)
+{
+ rq->load_subs[index].window_start = ws;
+ rq->load_subs[index].subs = 0;
+ rq->load_subs[index].new_subs = 0;
+}
+
+static bool get_subtraction_index(struct rq *rq, u64 ws)
+{
+ int i;
+ u64 oldest = ULLONG_MAX;
+ int oldest_index = 0;
+
+ for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
+ u64 entry_ws = rq->load_subs[i].window_start;
+
+ if (ws == entry_ws)
+ return i;
+
+ if (entry_ws < oldest) {
+ oldest = entry_ws;
+ oldest_index = i;
+ }
+ }
+
+ create_subtraction_entry(rq, ws, oldest_index);
+ return oldest_index;
+}
+
+static void update_rq_load_subtractions(int index, struct rq *rq,
+ u32 sub_load, bool new_task)
+{
+ rq->load_subs[index].subs += sub_load;
+ if (new_task)
+ rq->load_subs[index].new_subs += sub_load;
+}
+
+static void update_cluster_load_subtractions(struct task_struct *p,
+ int cpu, u64 ws, bool new_task)
+{
+ struct sched_cluster *cluster = cpu_cluster(cpu);
+ struct cpumask cluster_cpus = cluster->cpus;
+ u64 prev_ws = ws - sched_ravg_window;
+ int i;
+
+ cpumask_clear_cpu(cpu, &cluster_cpus);
+ raw_spin_lock(&cluster->load_lock);
+
+ for_each_cpu(i, &cluster_cpus) {
+ struct rq *rq = cpu_rq(i);
+ int index;
+
+ if (p->ravg.curr_window_cpu[i]) {
+ index = get_subtraction_index(rq, ws);
+ update_rq_load_subtractions(index, rq,
+ p->ravg.curr_window_cpu[i], new_task);
+ p->ravg.curr_window_cpu[i] = 0;
+ }
+
+ if (p->ravg.prev_window_cpu[i]) {
+ index = get_subtraction_index(rq, prev_ws);
+ update_rq_load_subtractions(index, rq,
+ p->ravg.prev_window_cpu[i], new_task);
+ p->ravg.prev_window_cpu[i] = 0;
+ }
+ }
+
+ raw_spin_unlock(&cluster->load_lock);
+}
+
+static inline void inter_cluster_migration_fixup
+ (struct task_struct *p, int new_cpu, int task_cpu, bool new_task)
+{
+ struct rq *dest_rq = cpu_rq(new_cpu);
+ struct rq *src_rq = cpu_rq(task_cpu);
+
+ if (same_freq_domain(new_cpu, task_cpu))
+ return;
+
+ p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window;
+ p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window;
+
+ dest_rq->curr_runnable_sum += p->ravg.curr_window;
+ dest_rq->prev_runnable_sum += p->ravg.prev_window;
+
+ src_rq->curr_runnable_sum -= p->ravg.curr_window_cpu[task_cpu];
+ src_rq->prev_runnable_sum -= p->ravg.prev_window_cpu[task_cpu];
+
+ if (new_task) {
+ dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
+ dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
+
+ src_rq->nt_curr_runnable_sum -=
+ p->ravg.curr_window_cpu[task_cpu];
+ src_rq->nt_prev_runnable_sum -=
+ p->ravg.prev_window_cpu[task_cpu];
+ }
+
+ p->ravg.curr_window_cpu[task_cpu] = 0;
+ p->ravg.prev_window_cpu[task_cpu] = 0;
+
+ update_cluster_load_subtractions(p, task_cpu,
+ src_rq->window_start, new_task);
+
+ BUG_ON((s64)src_rq->prev_runnable_sum < 0);
+ BUG_ON((s64)src_rq->curr_runnable_sum < 0);
+ BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
+ BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
+}
+
+static int get_top_index(unsigned long *bitmap, unsigned long old_top)
+{
+ int index = find_next_bit(bitmap, NUM_LOAD_INDICES, old_top);
+
+ if (index == NUM_LOAD_INDICES)
+ return 0;
+
+ return NUM_LOAD_INDICES - 1 - index;
+}
+
+static void
+migrate_top_tasks(struct task_struct *p, struct rq *src_rq, struct rq *dst_rq)
+{
+ int index;
+ int top_index;
+ u32 curr_window = p->ravg.curr_window;
+ u32 prev_window = p->ravg.prev_window;
+ u8 src = src_rq->curr_table;
+ u8 dst = dst_rq->curr_table;
+ u8 *src_table;
+ u8 *dst_table;
+
+ if (curr_window) {
+ src_table = src_rq->top_tasks[src];
+ dst_table = dst_rq->top_tasks[dst];
+ index = load_to_index(curr_window);
+ src_table[index] -= 1;
+ dst_table[index] += 1;
+
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
+ if (index > dst_rq->curr_top)
+ dst_rq->curr_top = index;
+
+ top_index = src_rq->curr_top;
+ if (index == top_index && !src_table[index])
+ src_rq->curr_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
+ }
+
+ if (prev_window) {
+ src = 1 - src;
+ dst = 1 - dst;
+ src_table = src_rq->top_tasks[src];
+ dst_table = dst_rq->top_tasks[dst];
+ index = load_to_index(prev_window);
+ src_table[index] -= 1;
+ dst_table[index] += 1;
+
+ if (!src_table[index])
+ __clear_bit(NUM_LOAD_INDICES - index - 1,
+ src_rq->top_tasks_bitmap[src]);
+
+ if (dst_table[index] == 1)
+ __set_bit(NUM_LOAD_INDICES - index - 1,
+ dst_rq->top_tasks_bitmap[dst]);
+
+ if (index > dst_rq->prev_top)
+ dst_rq->prev_top = index;
+
+ top_index = src_rq->prev_top;
+ if (index == top_index && !src_table[index])
+ src_rq->prev_top = get_top_index(
+ src_rq->top_tasks_bitmap[src], top_index);
+ }
+}
+
void fixup_busy_time(struct task_struct *p, int new_cpu)
{
struct rq *src_rq = task_rq(p);
@@ -3241,8 +3725,6 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
- int migrate_type;
- struct migration_sum_data d;
bool new_task;
struct related_thread_group *grp;
@@ -3276,75 +3758,62 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
new_task = is_new_task(p);
/* Protected by rq_lock */
grp = p->grp;
+
+ /*
+ * For frequency aggregation, we continue to do migration fixups
+ * even for intra cluster migrations. This is because, the aggregated
+ * load has to reported on a single CPU regardless.
+ */
if (grp && sched_freq_aggregate) {
struct group_cpu_time *cpu_time;
- migrate_type = GROUP_TO_GROUP;
- /* Protected by rq_lock */
cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
- d.src_rq = NULL;
- d.src_cpu_time = cpu_time;
src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- /* Protected by rq_lock */
cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
- d.dst_rq = NULL;
- d.dst_cpu_time = cpu_time;
dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
sync_window_start(dest_rq, cpu_time);
- } else {
- migrate_type = RQ_TO_RQ;
- d.src_rq = src_rq;
- d.src_cpu_time = NULL;
- d.dst_rq = dest_rq;
- d.dst_cpu_time = NULL;
- src_curr_runnable_sum = &src_rq->curr_runnable_sum;
- src_prev_runnable_sum = &src_rq->prev_runnable_sum;
- src_nt_curr_runnable_sum = &src_rq->nt_curr_runnable_sum;
- src_nt_prev_runnable_sum = &src_rq->nt_prev_runnable_sum;
-
- dst_curr_runnable_sum = &dest_rq->curr_runnable_sum;
- dst_prev_runnable_sum = &dest_rq->prev_runnable_sum;
- dst_nt_curr_runnable_sum = &dest_rq->nt_curr_runnable_sum;
- dst_nt_prev_runnable_sum = &dest_rq->nt_prev_runnable_sum;
- }
- if (p->ravg.curr_window) {
- *src_curr_runnable_sum -= p->ravg.curr_window;
- *dst_curr_runnable_sum += p->ravg.curr_window;
- if (new_task) {
- *src_nt_curr_runnable_sum -= p->ravg.curr_window;
- *dst_nt_curr_runnable_sum += p->ravg.curr_window;
+ if (p->ravg.curr_window) {
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+ if (new_task) {
+ *src_nt_curr_runnable_sum -=
+ p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum +=
+ p->ravg.curr_window;
+ }
}
- }
- if (p->ravg.prev_window) {
- *src_prev_runnable_sum -= p->ravg.prev_window;
- *dst_prev_runnable_sum += p->ravg.prev_window;
- if (new_task) {
- *src_nt_prev_runnable_sum -= p->ravg.prev_window;
- *dst_nt_prev_runnable_sum += p->ravg.prev_window;
+ if (p->ravg.prev_window) {
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+ if (new_task) {
+ *src_nt_prev_runnable_sum -=
+ p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum +=
+ p->ravg.prev_window;
+ }
}
+ } else {
+ inter_cluster_migration_fixup(p, new_cpu,
+ task_cpu(p), new_task);
}
+ migrate_top_tasks(p, src_rq, dest_rq);
+
if (p == src_rq->ed_task) {
src_rq->ed_task = NULL;
if (!dest_rq->ed_task)
dest_rq->ed_task = p;
}
- trace_sched_migration_update_sum(p, migrate_type, &d);
- BUG_ON((s64)*src_prev_runnable_sum < 0);
- BUG_ON((s64)*src_curr_runnable_sum < 0);
- BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
- BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
-
done:
if (p->state == TASK_WAKING)
double_rq_unlock(src_rq, dest_rq);
@@ -3496,6 +3965,9 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
struct migration_sum_data d;
int migrate_type;
+ int cpu = cpu_of(rq);
+ bool new_task = is_new_task(p);
+ int i;
if (!sched_freq_aggregate)
return;
@@ -3506,7 +3978,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
/* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
- cpu_time = _group_cpu_time(grp, cpu_of(rq));
+ cpu_time = _group_cpu_time(grp, cpu);
if (event == ADD_TASK) {
sync_window_start(rq, cpu_time);
migrate_type = RQ_TO_GROUP;
@@ -3523,6 +3995,19 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ *src_curr_runnable_sum -= p->ravg.curr_window_cpu[cpu];
+ *src_prev_runnable_sum -= p->ravg.prev_window_cpu[cpu];
+ if (new_task) {
+ *src_nt_curr_runnable_sum -=
+ p->ravg.curr_window_cpu[cpu];
+ *src_nt_prev_runnable_sum -=
+ p->ravg.prev_window_cpu[cpu];
+ }
+
+ update_cluster_load_subtractions(p, cpu,
+ rq->window_start, new_task);
+
} else {
migrate_type = GROUP_TO_RQ;
d.src_rq = NULL;
@@ -3545,21 +4030,42 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ if (new_task) {
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ }
+
+ /*
+ * Need to reset curr/prev windows for all CPUs, not just the
+ * ones in the same cluster. Since inter cluster migrations
+ * did not result in the appropriate book keeping, the values
+ * per CPU would be inaccurate.
+ */
+ for_each_possible_cpu(i) {
+ p->ravg.curr_window_cpu[i] = 0;
+ p->ravg.prev_window_cpu[i] = 0;
+ }
}
- *src_curr_runnable_sum -= p->ravg.curr_window;
*dst_curr_runnable_sum += p->ravg.curr_window;
-
- *src_prev_runnable_sum -= p->ravg.prev_window;
*dst_prev_runnable_sum += p->ravg.prev_window;
-
- if (is_new_task(p)) {
- *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ if (new_task) {
*dst_nt_curr_runnable_sum += p->ravg.curr_window;
- *src_nt_prev_runnable_sum -= p->ravg.prev_window;
*dst_nt_prev_runnable_sum += p->ravg.prev_window;
}
+ /*
+ * When a task enter or exits a group, it's curr and prev windows are
+ * moved to a single CPU. This behavior might be sub-optimal in the
+ * exit case, however, it saves us the overhead of handling inter
+ * cluster migration fixups while the task is part of a related group.
+ */
+ p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
+ p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
+
trace_sched_migration_update_sum(p, migrate_type, &d);
BUG_ON((s64)*src_curr_runnable_sum < 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ada5e580e968..471dc9faab35 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -313,7 +313,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
@@ -351,13 +351,23 @@ struct cfs_bandwidth { };
#ifdef CONFIG_SCHED_HMP
+#define NUM_TRACKED_WINDOWS 2
+#define NUM_LOAD_INDICES 1000
+
struct hmp_sched_stats {
int nr_big_tasks;
u64 cumulative_runnable_avg;
u64 pred_demands_sum;
};
+struct load_subtractions {
+ u64 window_start;
+ u64 subs;
+ u64 new_subs;
+};
+
struct sched_cluster {
+ raw_spinlock_t load_lock;
struct list_head list;
struct cpumask cpus;
int id;
@@ -742,6 +752,13 @@ struct rq {
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
+ DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
+ NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
+ u8 *top_tasks[NUM_TRACKED_WINDOWS];
+ u8 curr_table;
+ int prev_top;
+ int curr_top;
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -1017,6 +1034,10 @@ static inline void sched_ttwu_pending(void) { }
#define WINDOW_STATS_AVG 3
#define WINDOW_STATS_INVALID_POLICY 4
+#define FREQ_REPORT_MAX_CPU_LOAD_TOP_TASK 0
+#define FREQ_REPORT_CPU_LOAD 1
+#define FREQ_REPORT_TOP_TASK 2
+
#define MAJOR_TASK_PCT 85
#define SCHED_UPMIGRATE_MIN_NICE 15
#define EXITING_TASK_MARKER 0xdeaddead
@@ -1056,8 +1077,9 @@ extern unsigned int __read_mostly sched_spill_load;
extern unsigned int __read_mostly sched_upmigrate;
extern unsigned int __read_mostly sched_downmigrate;
extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
+extern unsigned int __read_mostly sched_load_granule;
-extern void init_new_task_load(struct task_struct *p);
+extern void init_new_task_load(struct task_struct *p, bool idle_task);
extern u64 sched_ktime_clock(void);
extern int got_boost_kick(void);
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
@@ -1401,6 +1423,7 @@ extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
struct cftype *cft, u64 upmigrate_discourage);
extern void sched_hmp_parse_dt(void);
extern void init_sched_hmp_boost_policy(void);
+extern void clear_top_tasks_bitmap(unsigned long *bitmap);
#else /* CONFIG_SCHED_HMP */
@@ -1503,7 +1526,9 @@ static inline struct sched_cluster *rq_cluster(struct rq *rq)
return NULL;
}
-static inline void init_new_task_load(struct task_struct *p) { }
+static inline void init_new_task_load(struct task_struct *p, bool idle_task)
+{
+}
static inline u64 scale_load_to_cpu(u64 load, int cpu)
{
@@ -1570,8 +1595,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void add_new_task_to_grp(struct task_struct *new) {}
#define sched_enable_hmp 0
-#define sched_freq_legacy_mode 1
-#define sched_migration_fixup 0
#define PRED_DEMAND_DELTA (0)
static inline void
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 4c44b1a4ad98..4f8182302e5e 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1,7 +1,231 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
#include "sched.h"
unsigned int sysctl_sched_cfs_boost __read_mostly;
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+ /* SchedTune CGroup subsystem */
+ struct cgroup_subsys_state css;
+
+ /* Boost group allocated ID */
+ int idx;
+
+ /* Boost value for tasks on that SchedTune CGroup */
+ int boost;
+
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+ return container_of(css, struct schedtune, css);
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+ return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+ return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+ .boost = 0,
+};
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ * make sense to boost with different values (e.g. background vs foreground
+ * tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ * implementation especially for the computation of the per-CPU boost
+ * value
+ */
+#define BOOSTGROUPS_COUNT 5
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+ &root_schedtune,
+ NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+ /* Maximum boost value for all RUNNABLE tasks on a CPU */
+ unsigned boost_max;
+ struct {
+ /* The boost for tasks on that boost group */
+ unsigned boost;
+ /* Count of RUNNABLE tasks on that boost group */
+ unsigned tasks;
+ } group[BOOSTGROUPS_COUNT];
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+static u64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 boost)
+{
+ struct schedtune *st = css_st(css);
+
+ if (boost < 0 || boost > 100)
+ return -EINVAL;
+
+ st->boost = boost;
+ if (css == &root_schedtune.css)
+ sysctl_sched_cfs_boost = boost;
+
+ return 0;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "boost",
+ .read_u64 = boost_read,
+ .write_u64 = boost_write,
+ },
+ { } /* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = st;
+
+ return 0;
+}
+
+static int
+schedtune_init(void)
+{
+ struct boost_groups *bg;
+ int cpu;
+
+ /* Initialize the per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ memset(bg, 0, sizeof(struct boost_groups));
+ }
+
+ pr_info(" schedtune configured to support %d boost groups\n",
+ BOOSTGROUPS_COUNT);
+ return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct schedtune *st;
+ int idx;
+
+ if (!parent_css) {
+ schedtune_init();
+ return &root_schedtune.css;
+ }
+
+ /* Allow only single level hierachies */
+ if (parent_css != &root_schedtune.css) {
+ pr_err("Nested SchedTune boosting groups not allowed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Allow only a limited number of boosting groups */
+ for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+ if (!allocated_group[idx])
+ break;
+ if (idx == BOOSTGROUPS_COUNT) {
+ pr_err("Trying to create more than %d SchedTune boosting groups\n",
+ BOOSTGROUPS_COUNT);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto out;
+
+ /* Initialize per CPUs boost group support */
+ st->idx = idx;
+ if (schedtune_boostgroup_init(st))
+ goto release;
+
+ return &st->css;
+
+release:
+ kfree(st);
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+ struct schedtune *st = css_st(css);
+
+ schedtune_boostgroup_release(st);
+ kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+ .css_alloc = schedtune_css_alloc,
+ .css_free = schedtune_css_free,
+ .legacy_cftypes = files,
+ .early_init = 1,
+ .allow_attach = subsys_cgroup_allow_attach,
+};
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
int
sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 6949476a118f..3a0415803b09 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -32,7 +32,7 @@ struct task_struct *idle_thread_get(unsigned int cpu)
if (!tsk)
return ERR_PTR(-ENOMEM);
- init_idle(tsk, cpu);
+ init_idle(tsk, cpu, true);
return tsk;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8e2f4ab15498..c72cb2053da7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -297,6 +297,14 @@ static struct ctl_table kern_table[] = {
},
#ifdef CONFIG_SCHED_HMP
{
+ .procname = "sched_freq_reporting_policy",
+ .data = &sysctl_sched_freq_reporting_policy,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
.procname = "sched_freq_inc_notify",
.data = &sysctl_sched_freq_inc_notify,
.maxlen = sizeof(unsigned int),
@@ -636,7 +644,11 @@ static struct ctl_table kern_table[] = {
.procname = "sched_cfs_boost",
.data = &sysctl_sched_cfs_boost,
.maxlen = sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ .mode = 0444,
+#else
.mode = 0644,
+#endif
.proc_handler = &sysctl_sched_cfs_boost_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c049d4..554522934c44 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -74,7 +74,7 @@ next_tag:
/* Extract a tag from the data */
tag = data[dp++];
- if (tag == 0) {
+ if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
/* Extract the length */
len = data[dp++];
- if (len <= 0x7f) {
- dp += len;
- goto next_tag;
- }
+ if (len <= 0x7f)
+ goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
}
n = len - 0x80;
- if (unlikely(n > sizeof(size_t) - 1))
+ if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
- for (len = 0; n > 0; n--) {
+ len = 0;
+ for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
dp += len;
goto next_tag;
diff --git a/net/core/dev.c b/net/core/dev.c
index a299c3956daa..a4c647893e52 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3544,9 +3544,6 @@ static int netif_rx_internal(struct sk_buff *skb)
trace_netif_rx(skb);
#ifdef CONFIG_RPS
- WARN_ONCE(skb_cloned(skb), "Cloned packet from dev %s\n",
- skb->dev->name);
-
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b8f7e621e16e..32027efa5033 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3428,7 +3428,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
if (tcp_oow_rate_limited(sock_net(sk), skb,
@@ -3436,13 +3436,18 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
&tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 3e47e0641780..23b7c76ff2d8 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -416,7 +416,7 @@ country EE: DFS-ETSI
(57240 - 65880 @ 2160), (40), NO-OUTDOOR
country EG: DFS-ETSI
- (2402 - 2482 @ 40), (20)
+ (2402 - 2482 @ 20), (20)
(5170 - 5250 @ 20), (23)
(5250 - 5330 @ 20), (23), DFS
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index dee66f231ceb..7d649ba2b505 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -318,8 +318,9 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
struct msm_ext_disp_audio_codec_rx_data *codec_data =
dev_get_drvdata(dai->codec->dev);
- if (!codec_data || !codec_data->ext_disp_ops.cable_status) {
- dev_err(dai->dev, "%s: codec data or cable_status is null\n",
+ if (!codec_data || !codec_data->ext_disp_ops.teardown_done ||
+ !codec_data->ext_disp_ops.cable_status) {
+ dev_err(dai->dev, "%s: codec data or teardown_done or cable_status is null\n",
__func__);
return;
}
@@ -332,6 +333,8 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
__func__);
}
+ codec_data->ext_disp_ops.teardown_done(
+ codec_data->ext_disp_core_pdev);
return;
}
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index ee8b27dbec64..d9d413f0a80a 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -136,7 +136,7 @@ struct wdsp_ramdump_data {
void *rd_v_addr;
/* Data provided through error interrupt */
- struct wdsp_err_intr_arg err_data;
+ struct wdsp_err_signal_arg err_data;
};
struct wdsp_mgr_priv {
@@ -608,7 +608,7 @@ static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev,
static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
{
struct wdsp_img_section img_section;
- struct wdsp_err_intr_arg *data = &wdsp->dump_data.err_data;
+ struct wdsp_err_signal_arg *data = &wdsp->dump_data.err_data;
struct ramdump_segment rd_seg;
int ret = 0;
@@ -684,17 +684,18 @@ static void wdsp_ssr_work_fn(struct work_struct *work)
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
- wdsp_collect_ramdumps(wdsp);
-
- /* In case of CDC_DOWN event, the DSP is already shutdown */
- if (wdsp->ssr_type != WDSP_SSR_TYPE_CDC_DOWN) {
+ /* Issue ramdumps and shutdown only if DSP is currently booted */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ wdsp_collect_ramdumps(wdsp);
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
WDSP_EVENT_DO_SHUTDOWN, NULL);
if (IS_ERR_VALUE(ret))
WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret);
+
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN,
+ NULL);
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
}
- wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN, NULL);
- WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
ret = wait_for_completion_timeout(&wdsp->ready_compl,
@@ -739,7 +740,7 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
enum wdsp_ssr_type ssr_type)
{
enum wdsp_ssr_type current_ssr_type;
- struct wdsp_err_intr_arg *err_data;
+ struct wdsp_err_signal_arg *err_data;
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
@@ -750,7 +751,7 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
wdsp->ssr_type = ssr_type;
if (arg) {
- err_data = (struct wdsp_err_intr_arg *) arg;
+ err_data = (struct wdsp_err_signal_arg *) arg;
memcpy(&wdsp->dump_data.err_data, err_data,
sizeof(*err_data));
} else {
@@ -761,16 +762,29 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
switch (ssr_type) {
case WDSP_SSR_TYPE_WDSP_DOWN:
- case WDSP_SSR_TYPE_CDC_DOWN:
__wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY);
- if (ssr_type == WDSP_SSR_TYPE_CDC_DOWN)
- __wdsp_clr_ready_locked(wdsp,
- WDSP_SSR_STATUS_CDC_READY);
wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN,
NULL);
schedule_work(&wdsp->ssr_work);
break;
+ case WDSP_SSR_TYPE_CDC_DOWN:
+ __wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY);
+ /*
+ * If DSP is booted when CDC_DOWN is received, it needs
+ * to be shutdown.
+ */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ __wdsp_clr_ready_locked(wdsp,
+ WDSP_SSR_STATUS_WDSP_READY);
+ wdsp_broadcast_event_downseq(wdsp,
+ WDSP_EVENT_PRE_SHUTDOWN,
+ NULL);
+ }
+
+ schedule_work(&wdsp->ssr_work);
+ break;
+
case WDSP_SSR_TYPE_CDC_UP:
__wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY, true);
break;
@@ -787,8 +801,8 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
return 0;
}
-static int wdsp_intr_handler(struct device *wdsp_dev,
- enum wdsp_intr intr, void *arg)
+static int wdsp_signal_handler(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg)
{
struct wdsp_mgr_priv *wdsp;
int ret;
@@ -799,7 +813,9 @@ static int wdsp_intr_handler(struct device *wdsp_dev,
wdsp = dev_get_drvdata(wdsp_dev);
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
- switch (intr) {
+ WDSP_DBG(wdsp, "Raised signal %d", signal);
+
+ switch (signal) {
case WDSP_IPC1_INTR:
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_IPC,
WDSP_EVENT_IPC1_INTR, NULL);
@@ -807,14 +823,20 @@ static int wdsp_intr_handler(struct device *wdsp_dev,
case WDSP_ERR_INTR:
ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_WDSP_DOWN);
break;
+ case WDSP_CDC_DOWN_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_DOWN);
+ break;
+ case WDSP_CDC_UP_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_UP);
+ break;
default:
ret = -EINVAL;
break;
}
if (IS_ERR_VALUE(ret))
- WDSP_ERR(wdsp, "handling intr %d failed with error %d",
- intr, ret);
+ WDSP_ERR(wdsp, "handling signal %d failed with error %d",
+ signal, ret);
WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
return ret;
@@ -870,7 +892,7 @@ static int wdsp_resume(struct device *wdsp_dev)
static struct wdsp_mgr_ops wdsp_ops = {
.register_cmpnt_ops = wdsp_register_cmpnt_ops,
.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
- .intr_handler = wdsp_intr_handler,
+ .signal_handler = wdsp_signal_handler,
.vote_for_dsp = wdsp_vote_for_dsp,
.suspend = wdsp_suspend,
.resume = wdsp_resume,
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 60efcb174740..70be9c98b481 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -80,7 +80,9 @@
/* Word sizes and min/max lengths */
#define WCD_SPI_WORD_BYTE_CNT (4)
#define WCD_SPI_RW_MULTI_MIN_LEN (16)
-#define WCD_SPI_RW_MULTI_MAX_LEN (64 * 1024)
+
+/* Max size is closest multiple of 16 less than 64Kbytes */
+#define WCD_SPI_RW_MULTI_MAX_LEN ((64 * 1024) - 16)
/* Alignment requirements */
#define WCD_SPI_RW_MIN_ALIGN WCD_SPI_WORD_BYTE_CNT
@@ -104,6 +106,12 @@
mutex_unlock(&lock); \
}
+struct wcd_spi_debug_data {
+ struct dentry *dir;
+ u32 addr;
+ u32 size;
+};
+
struct wcd_spi_priv {
struct spi_device *spi;
u32 mem_base_addr;
@@ -133,6 +141,9 @@ struct wcd_spi_priv {
struct device *m_dev;
struct wdsp_mgr_ops *m_ops;
+
+ /* Debugfs related information */
+ struct wcd_spi_debug_data debug_data;
};
enum xfer_request {
@@ -319,7 +330,7 @@ static int wcd_spi_transfer_split(struct spi_device *spi,
u32 addr = data_msg->remote_addr;
u8 *data = data_msg->data;
int remain_size = data_msg->len;
- int to_xfer, loop_cnt, ret;
+ int to_xfer, loop_cnt, ret = 0;
/* Perform single writes until multi word alignment is met */
loop_cnt = 1;
@@ -631,14 +642,21 @@ static int wcd_spi_init(struct spi_device *spi)
if (IS_ERR_VALUE(ret))
goto err_wr_en;
+ /*
+ * In case spi_init is called after component deinit,
+ * it is possible hardware register state is also reset.
+ * Sync the regcache here so hardware state is updated
+ * to reflect the cache.
+ */
+ regcache_sync(wcd_spi->regmap);
+
regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
0x0F3D0800);
- /* Write the MTU to 64K */
+ /* Write the MTU to max allowed size */
regmap_update_bits(wcd_spi->regmap,
WCD_SPI_SLAVE_TRNS_LEN,
- 0xFFFF0000,
- (WCD_SPI_RW_MULTI_MAX_LEN / 4) << 16);
+ 0xFFFF0000, 0xFFFF0000);
err_wr_en:
wcd_spi_clk_ctrl(spi, WCD_SPI_CLK_DISABLE,
WCD_SPI_CLK_FLAG_IMMEDIATE);
@@ -837,7 +855,7 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
void *data)
{
struct spi_device *spi = to_spi_device(dev);
- int ret;
+ int ret = 0;
dev_dbg(&spi->dev, "%s: event type %d\n",
__func__, event);
@@ -1004,20 +1022,81 @@ static const struct file_operations state_fops = {
.release = single_release,
};
+static ssize_t wcd_spi_debugfs_mem_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct spi_device *spi = file->private_data;
+ struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+ struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
+ struct wcd_spi_msg msg;
+ ssize_t buf_size, read_count = 0;
+ char *buf;
+ int ret;
+
+ if (*ppos < 0 || !count)
+ return -EINVAL;
+
+ if (dbg_data->size == 0 || dbg_data->addr == 0) {
+ dev_err(&spi->dev,
+ "%s: Invalid request, size = %u, addr = 0x%x\n",
+ __func__, dbg_data->size, dbg_data->addr);
+ return 0;
+ }
+
+ buf_size = count < dbg_data->size ? count : dbg_data->size;
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ msg.data = buf;
+ msg.remote_addr = dbg_data->addr;
+ msg.len = buf_size;
+ msg.flags = 0;
+
+ ret = wcd_spi_data_read(spi, &msg);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(&spi->dev,
+ "%s: Failed to read %zu bytes from addr 0x%x\n",
+ __func__, buf_size, msg.remote_addr);
+ goto done;
+ }
+
+ read_count = simple_read_from_buffer(ubuf, count, ppos, buf, buf_size);
+
+done:
+ kfree(buf);
+ if (ret < 0)
+ return ret;
+ else
+ return read_count;
+}
+
+static const struct file_operations mem_read_fops = {
+ .open = simple_open,
+ .read = wcd_spi_debugfs_mem_read,
+};
+
static int wcd_spi_debugfs_init(struct spi_device *spi)
{
+ struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
+ struct wcd_spi_debug_data *dbg_data = &wcd_spi->debug_data;
int rc = 0;
- struct dentry *dir;
- dir = debugfs_create_dir("wcd_spi", NULL);
- if (IS_ERR_OR_NULL(dir)) {
- dir = NULL;
+ dbg_data->dir = debugfs_create_dir("wcd_spi", NULL);
+ if (IS_ERR_OR_NULL(dbg_data->dir)) {
+ dbg_data->dir = NULL;
rc = -ENODEV;
goto done;
}
- debugfs_create_file("state", 0444, dir, spi, &state_fops);
+ debugfs_create_file("state", 0444, dbg_data->dir, spi, &state_fops);
+ debugfs_create_u32("addr", S_IRUGO | S_IWUSR, dbg_data->dir,
+ &dbg_data->addr);
+ debugfs_create_u32("size", S_IRUGO | S_IWUSR, dbg_data->dir,
+ &dbg_data->size);
+ debugfs_create_file("mem_read", S_IRUGO, dbg_data->dir,
+ spi, &mem_read_fops);
done:
return rc;
}
@@ -1093,46 +1172,12 @@ static struct regmap_config wcd_spi_regmap_cfg = {
static int wdsp_spi_init(struct device *dev, void *priv_data)
{
struct spi_device *spi = to_spi_device(dev);
- struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
int ret;
- wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
- wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
-
- wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
- &spi->dev, &wcd_spi_regmap_cfg);
- if (IS_ERR(wcd_spi->regmap)) {
- ret = PTR_ERR(wcd_spi->regmap);
- dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
- __func__, ret);
- goto err_regmap;
- }
-
- if (wcd_spi_debugfs_init(spi))
- dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
-
- spi_message_init(&wcd_spi->msg1);
- spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
-
- spi_message_init(&wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
-
ret = wcd_spi_init(spi);
- if (IS_ERR_VALUE(ret)) {
+ if (IS_ERR_VALUE(ret))
dev_err(&spi->dev, "%s: Init failed, err = %d\n",
__func__, ret);
- goto err_init;
- }
-
- return 0;
-
-err_init:
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
-
-err_regmap:
return ret;
}
@@ -1141,9 +1186,11 @@ static int wdsp_spi_deinit(struct device *dev, void *priv_data)
struct spi_device *spi = to_spi_device(dev);
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
+ /*
+ * Deinit means the hardware is reset. Mark the cache
+ * as dirty here, so init will sync the cache
+ */
+ regcache_mark_dirty(wcd_spi->regmap);
return 0;
}
@@ -1170,9 +1217,34 @@ static int wcd_spi_component_bind(struct device *dev,
ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
wcd_spi,
&wdsp_spi_ops);
- if (ret)
+ if (ret) {
dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
__func__, ret);
+ goto done;
+ }
+
+ wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
+ wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
+
+ wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
+ &spi->dev, &wcd_spi_regmap_cfg);
+ if (IS_ERR(wcd_spi->regmap)) {
+ ret = PTR_ERR(wcd_spi->regmap);
+ dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ if (wcd_spi_debugfs_init(spi))
+ dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
+
+ spi_message_init(&wcd_spi->msg1);
+ spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
+
+ spi_message_init(&wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+done:
return ret;
}
@@ -1185,6 +1257,10 @@ static void wcd_spi_component_unbind(struct device *dev,
wcd_spi->m_dev = NULL;
wcd_spi->m_ops = NULL;
+
+ spi_transfer_del(&wcd_spi->xfer1);
+ spi_transfer_del(&wcd_spi->xfer2[0]);
+ spi_transfer_del(&wcd_spi->xfer2[1]);
}
static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 46b8e7f72eb8..ed984496aec1 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -842,6 +842,9 @@ struct tasha_priv {
int rx_8_count;
bool clk_mode;
bool clk_internal;
+
+ /* Lock to protect mclk enablement */
+ struct mutex mclk_lock;
};
static int tasha_codec_vote_max_bw(struct snd_soc_codec *codec,
@@ -1152,13 +1155,14 @@ static int tasha_cdc_req_mclk_enable(struct tasha_priv *tasha,
{
int ret = 0;
+ mutex_lock(&tasha->mclk_lock);
if (enable) {
tasha_cdc_sido_ccl_enable(tasha, true);
ret = clk_prepare_enable(tasha->wcd_ext_clk);
if (ret) {
dev_err(tasha->dev, "%s: ext clk enable failed\n",
__func__);
- goto err;
+ goto unlock_mutex;
}
/* get BG */
wcd_resmgr_enable_master_bias(tasha->resmgr);
@@ -1172,7 +1176,8 @@ static int tasha_cdc_req_mclk_enable(struct tasha_priv *tasha,
clk_disable_unprepare(tasha->wcd_ext_clk);
tasha_cdc_sido_ccl_enable(tasha, false);
}
-err:
+unlock_mutex:
+ mutex_unlock(&tasha->mclk_lock);
return ret;
}
@@ -4596,9 +4601,11 @@ static int tasha_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
if (!ret) {
wcd_clsh_imped_config(codec, impedl, false);
set_bit(CLASSH_CONFIG, &tasha->status_mask);
- } else
+ } else {
dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
__func__, ret);
+ ret = 0;
+ }
break;
@@ -10258,14 +10265,14 @@ static int tasha_codec_ec_buf_mux_enable(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x3B);
- snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x68, 0x28);
+ snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x08);
snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
0x08, 0x08);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0,
0x08, 0x00);
- snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x68, 0x40);
+ snd_soc_update_bits(codec, WCD9335_CPE_SS_CFG, 0x08, 0x00);
snd_soc_write(codec, WCD9335_CPE_SS_EC_BUF_INT_PERIOD, 0x00);
break;
}
@@ -14112,6 +14119,7 @@ static int tasha_probe(struct platform_device *pdev)
mutex_init(&tasha->swr_read_lock);
mutex_init(&tasha->swr_write_lock);
mutex_init(&tasha->swr_clk_lock);
+ mutex_init(&tasha->mclk_lock);
cdc_pwr = devm_kzalloc(&pdev->dev, sizeof(struct wcd9xxx_power_region),
GFP_KERNEL);
@@ -14197,6 +14205,7 @@ err_clk:
err_resmgr:
devm_kfree(&pdev->dev, cdc_pwr);
err_cdc_pwr:
+ mutex_destroy(&tasha->mclk_lock);
devm_kfree(&pdev->dev, tasha);
return ret;
}
@@ -14211,6 +14220,7 @@ static int tasha_remove(struct platform_device *pdev)
clk_put(tasha->wcd_ext_clk);
if (tasha->wcd_native_clk)
clk_put(tasha->wcd_native_clk);
+ mutex_destroy(&tasha->mclk_lock);
devm_kfree(&pdev->dev, tasha);
snd_soc_unregister_codec(&pdev->dev);
return 0;
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 55072466af55..4e3e769585e6 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -28,6 +28,9 @@
#define DSD_VOLUME_STEP_DELAY_US ((1000 * DSD_VOLUME_UPDATE_DELAY_MS) / \
(2 * DSD_VOLUME_STEPS))
+#define TAVIL_VERSION_1_0 0
+#define TAVIL_VERSION_1_1 1
+
static const DECLARE_TLV_DB_MINMAX(tavil_dsd_db_scale, DSD_VOLUME_MIN_M110dB,
DSD_VOLUME_MAX_0dB);
@@ -369,6 +372,14 @@ static void tavil_dsd_data_pull(struct snd_soc_codec *codec, int dsd_num,
}
}
+static void tavil_dsd_update_volume(struct tavil_dsd_config *dsd_conf)
+{
+ snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+ 0x01, 0x01);
+ snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+ 0x01, 0x00);
+}
+
static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -429,6 +440,8 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
/* Apply Gain */
snd_soc_write(codec, WCD934X_CDC_DSD0_CFG1,
dsd_conf->volume[DSD0]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
} else if (w->shift == DSD1) {
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
@@ -440,6 +453,8 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
/* Apply Gain */
snd_soc_write(codec, WCD934X_CDC_DSD1_CFG1,
dsd_conf->volume[DSD1]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
}
/* 10msec sleep required after DSD clock is set */
usleep_range(10000, 10100);
@@ -538,16 +553,23 @@ static int tavil_dsd_vol_put(struct snd_kcontrol *kcontrol,
snd_soc_write(codec,
WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
nv1);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
+
/* sleep required after each volume step */
usleep_range(DSD_VOLUME_STEP_DELAY_US,
(DSD_VOLUME_STEP_DELAY_US +
DSD_VOLUME_USLEEP_MARGIN_US));
}
- if (nv1 != nv[dsd_idx])
+ if (nv1 != nv[dsd_idx]) {
snd_soc_write(codec,
WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
nv[dsd_idx]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
+ }
+
dsd_conf->volume[dsd_idx] = nv[dsd_idx];
}
@@ -629,9 +651,14 @@ struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec)
dsd_conf->codec = codec;
+ /* Read version */
+ dsd_conf->version = snd_soc_read(codec,
+ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0);
/* DSD registers init */
- snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
- snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+ if (dsd_conf->version == TAVIL_VERSION_1_0) {
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+ }
/* DSD0: Mute EN */
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x04, 0x04);
/* DSD1: Mute EN */
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
index c033795beb9b..21450c90a272 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
@@ -40,6 +40,7 @@ struct tavil_dsd_config {
u32 base_sample_rate[DSD_MAX];
int volume[DSD_MAX];
struct mutex vol_mutex;
+ int version;
};
#ifdef CONFIG_SND_SOC_WCD934X_DSD
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index e649770297f1..8d2247176607 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -523,7 +523,9 @@ static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl)
ARRAY_SIZE(mem_enable_values),
mem_enable_values);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0x05);
+ /* Make sure Deep sleep of memories is enabled for all banks */
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
done:
return ret;
}
@@ -533,6 +535,7 @@ static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl)
struct snd_soc_codec *codec = cntl->codec;
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN, 0x07);
@@ -646,9 +649,9 @@ static irqreturn_t wcd_cntl_ipc_irq(int irq, void *data)
complete(&cntl->boot_complete);
if (cntl->m_dev && cntl->m_ops &&
- cntl->m_ops->intr_handler)
- ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_IPC1_INTR,
- NULL);
+ cntl->m_ops->signal_handler)
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_IPC1_INTR,
+ NULL);
else
ret = -EINVAL;
@@ -663,7 +666,7 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
{
struct wcd_dsp_cntl *cntl = data;
struct snd_soc_codec *codec = cntl->codec;
- struct wdsp_err_intr_arg arg;
+ struct wdsp_err_signal_arg arg;
u16 status = 0;
u8 reg_val;
int ret = 0;
@@ -678,19 +681,19 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
__func__, status);
if ((status & cntl->irqs.fatal_irqs) &&
- (cntl->m_dev && cntl->m_ops && cntl->m_ops->intr_handler)) {
+ (cntl->m_dev && cntl->m_ops && cntl->m_ops->signal_handler)) {
arg.mem_dumps_enabled = cntl->ramdump_enable;
arg.remote_start_addr = WCD_934X_RAMDUMP_START_ADDR;
arg.dump_size = WCD_934X_RAMDUMP_SIZE;
- ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_ERR_INTR,
- &arg);
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_ERR_INTR,
+ &arg);
if (IS_ERR_VALUE(ret))
dev_err(cntl->codec->dev,
"%s: Failed to handle fatal irq 0x%x\n",
__func__, status & cntl->irqs.fatal_irqs);
wcd_cntl_change_online_state(cntl, 0);
} else {
- dev_err(cntl->codec->dev, "%s: Invalid intr_handler\n",
+ dev_err(cntl->codec->dev, "%s: Invalid signal_handler\n",
__func__);
}
@@ -833,8 +836,7 @@ static int wcd_control_init(struct device *dev, void *priv_data)
struct snd_soc_codec *codec = cntl->codec;
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
- int ret, ret1;
+ int ret;
bool err_irq_requested = false;
ret = wcd9xxx_request_irq(core_res,
@@ -876,25 +878,8 @@ static int wcd_control_init(struct device *dev, void *priv_data)
}
wcd_cntl_cpar_ctrl(cntl, true);
- snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
- "%s%d", "wdsp", cntl->dsp_instance);
- wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
- ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
- if (IS_ERR_VALUE(ret)) {
- dev_err(codec->dev,
- "%s: Failed to init sysfs %d\n",
- __func__, ret);
- goto err_sysfs_init;
- }
-
return 0;
-err_sysfs_init:
- wcd_cntl_cpar_ctrl(cntl, false);
- ret1 = wcd_cntl_clocks_disable(cntl);
- if (IS_ERR_VALUE(ret1))
- dev_err(codec->dev, "%s: Failed to disable clocks, err = %d\n",
- __func__, ret1);
err_clk_enable:
/* Mask all error interrupts */
snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A, 0xFF);
@@ -916,12 +901,6 @@ static int wcd_control_deinit(struct device *dev, void *priv_data)
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- /* Remove the sysfs entries */
- wcd_cntl_sysfs_remove(cntl);
-
- /* Remove the debugfs entries */
- wcd_cntl_debugfs_remove(cntl);
-
wcd_cntl_clocks_disable(cntl);
wcd_cntl_cpar_ctrl(cntl, false);
@@ -951,6 +930,7 @@ static int wcd_ctrl_component_bind(struct device *dev,
struct snd_card *card;
struct snd_info_entry *entry;
char proc_name[WCD_PROCFS_ENTRY_MAX_LEN];
+ char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
int ret = 0;
if (!dev || !master || !data) {
@@ -982,6 +962,17 @@ static int wcd_ctrl_component_bind(struct device *dev,
goto done;
}
+ snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
+ "%s%d", "wdsp", cntl->dsp_instance);
+ ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "%s: sysfs_init failed, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
+
codec = cntl->codec;
card = codec->component.card->snd_card;
snprintf(proc_name, WCD_PROCFS_ENTRY_MAX_LEN, "%s%d%s", "cpe",
@@ -1032,6 +1023,13 @@ static void wcd_ctrl_component_unbind(struct device *dev,
cntl->m_dev = NULL;
cntl->m_ops = NULL;
+
+ /* Remove the sysfs entries */
+ wcd_cntl_sysfs_remove(cntl);
+
+ /* Remove the debugfs entries */
+ wcd_cntl_debugfs_remove(cntl);
+
}
static const struct component_ops wcd_ctrl_component_ops = {
@@ -1040,6 +1038,60 @@ static const struct component_ops wcd_ctrl_component_ops = {
};
/*
+ * wcd_dsp_ssr_event: handle the SSR event raised by caller.
+ * @cntl: Handle to the wcd_dsp_cntl structure
+ * @event: The SSR event to be handled
+ *
+ * Notifies the manager driver about the SSR event.
+ * Returns 0 on success and negative error code on error.
+ */
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event)
+{
+ int ret = 0;
+
+ if (!cntl) {
+ pr_err("%s: Invalid handle to control\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cntl->m_dev || !cntl->m_ops || !cntl->m_ops->signal_handler) {
+ dev_err(cntl->codec->dev,
+ "%s: Invalid signal_handler callback\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case WCD_CDC_DOWN_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_DOWN_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_DOWN_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ wcd_cntl_change_online_state(cntl, 0);
+ break;
+ case WCD_CDC_UP_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_UP_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_UP_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ break;
+ default:
+ dev_err(cntl->codec->dev, "%s: Invalid event %d\n",
+ __func__, event);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(wcd_dsp_ssr_event);
+
+/*
* wcd_dsp_cntl_init: Initialize the wcd-dsp control
* @codec: pointer to the codec handle
* @params: Parameters required to initialize wcd-dsp control
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
index cd6697b3d641..83c59ed7b676 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
@@ -17,6 +17,11 @@
#include <sound/soc.h>
#include <sound/wcd-dsp-mgr.h>
+enum cdc_ssr_event {
+ WCD_CDC_DOWN_EVENT,
+ WCD_CDC_UP_EVENT,
+};
+
struct wcd_dsp_cdc_cb {
/* Callback to enable codec clock */
int (*cdc_clk_en)(struct snd_soc_codec *, bool);
@@ -106,5 +111,5 @@ void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
struct wcd_dsp_params *params,
struct wcd_dsp_cntl **cntl);
void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl);
-
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event);
#endif /* end __WCD_DSP_CONTROL_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 9e18c17d6f1c..b8dcd264b5d2 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -260,6 +260,11 @@ static const struct intr_data wcd934x_intr_table[] = {
{WCD934X_IRQ_VBAT_RESTORE, false},
};
+struct tavil_cpr_reg_defaults {
+ int wr_data;
+ int wr_addr;
+};
+
struct interp_sample_rate {
int sample_rate;
int rate_val;
@@ -358,6 +363,15 @@ enum {
ASRC_MAX,
};
+enum {
+ CONV_88P2K_TO_384K,
+ CONV_96K_TO_352P8K,
+ CONV_352P8K_TO_384K,
+ CONV_384K_TO_352P8K,
+ CONV_384K_TO_384K,
+ CONV_96K_TO_384K,
+};
+
static struct afe_param_slimbus_slave_port_cfg tavil_slimbus_slave_port_cfg = {
.minor_version = 1,
.slimbus_dev_id = AFE_SLIMBUS_DEVICE_1,
@@ -577,7 +591,7 @@ struct tavil_priv {
/* num of slim ports required */
struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS];
/* Port values for Rx and Tx codec_dai */
- unsigned int rx_port_value;
+ unsigned int rx_port_value[WCD934X_RX_MAX];
unsigned int tx_port_value;
struct wcd9xxx_resmgr_v2 *resmgr;
@@ -616,6 +630,7 @@ struct tavil_priv {
int native_clk_users;
/* ASRC users count */
int asrc_users[ASRC_MAX];
+ int asrc_output_mode[ASRC_MAX];
/* Main path clock users count */
int main_clk_users[WCD934X_NUM_INTERPOLATORS];
struct tavil_dsd_config *dsd_config;
@@ -1298,7 +1313,8 @@ static int slim_rx_mux_get(struct snd_kcontrol *kcontrol,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm);
struct tavil_priv *tavil_p = snd_soc_codec_get_drvdata(codec);
- ucontrol->value.enumerated.item[0] = tavil_p->rx_port_value;
+ ucontrol->value.enumerated.item[0] =
+ tavil_p->rx_port_value[widget->shift];
return 0;
}
@@ -1313,17 +1329,20 @@ static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
struct snd_soc_dapm_update *update = NULL;
+ unsigned int rx_port_value;
u32 port_id = widget->shift;
+ tavil_p->rx_port_value[port_id] = ucontrol->value.enumerated.item[0];
+ rx_port_value = tavil_p->rx_port_value[port_id];
+
mutex_lock(&tavil_p->codec_mutex);
- tavil_p->rx_port_value = ucontrol->value.enumerated.item[0];
dev_dbg(codec->dev, "%s: wname %s cname %s value %u shift %d item %ld\n",
__func__, widget->name, ucontrol->id.name,
- tavil_p->rx_port_value, widget->shift,
+ rx_port_value, widget->shift,
ucontrol->value.integer.value[0]);
/* value need to match the Virtual port and AIF number */
- switch (tavil_p->rx_port_value) {
+ switch (rx_port_value) {
case 0:
list_del_init(&core->rx_chs[port_id].list);
break;
@@ -1372,13 +1391,13 @@ static int slim_rx_mux_put(struct snd_kcontrol *kcontrol,
&tavil_p->dai[AIF4_PB].wcd9xxx_ch_list);
break;
default:
- dev_err(codec->dev, "Unknown AIF %d\n", tavil_p->rx_port_value);
+ dev_err(codec->dev, "Unknown AIF %d\n", rx_port_value);
goto err;
}
rtn:
mutex_unlock(&tavil_p->codec_mutex);
snd_soc_dapm_mux_update_power(widget->dapm, kcontrol,
- tavil_p->rx_port_value, e, update);
+ rx_port_value, e, update);
return 0;
err:
@@ -1859,8 +1878,9 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, (0x03 << 1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
@@ -1922,8 +1942,9 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
blocking_notifier_call_chain(&tavil->mbhc->notifier,
WCD_EVENT_POST_HPHR_PA_OFF,
&tavil->mbhc->wcd_mbhc);
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, 0x0);
break;
};
@@ -1942,8 +1963,9 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, (0x03 << 1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2004,8 +2026,9 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
blocking_notifier_call_chain(&tavil->mbhc->notifier,
WCD_EVENT_POST_HPHL_PA_OFF,
&tavil->mbhc->wcd_mbhc);
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, 0x0);
break;
};
@@ -2130,9 +2153,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
0x02, 0x00);
/* Set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x40);
-
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x40);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
hph_mode = CLS_H_HIFI;
@@ -2155,8 +2179,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_SIDO_NEW_VOUT_D_FREQ2,
0x01, 0x0);
/* Re-set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x0);
break;
default:
break;
@@ -2199,8 +2225,10 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
0x02, 0x00);
/* Set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x40);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x40);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
hph_mode = CLS_H_HIFI;
@@ -2223,8 +2251,10 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_SIDO_NEW_VOUT_D_FREQ2,
0x01, 0x0);
/* Re-set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x0);
break;
default:
break;
@@ -2581,6 +2611,45 @@ done:
return rc;
}
+static int tavil_get_asrc_mode(struct tavil_priv *tavil, int asrc,
+ u8 main_sr, u8 mix_sr)
+{
+ u8 asrc_output_mode;
+ int asrc_mode = CONV_88P2K_TO_384K;
+
+ if ((asrc < 0) || (asrc >= ASRC_MAX))
+ return 0;
+
+ asrc_output_mode = tavil->asrc_output_mode[asrc];
+
+ if (asrc_output_mode) {
+ /*
+ * If Mix sample rate is < 96KHz, use 96K to 352.8K
+ * conversion, or else use 384K to 352.8K conversion
+ */
+ if (mix_sr < 5)
+ asrc_mode = CONV_96K_TO_352P8K;
+ else
+ asrc_mode = CONV_384K_TO_352P8K;
+ } else {
+ /* Integer main and Fractional mix path */
+ if (main_sr < 8 && mix_sr > 9) {
+ asrc_mode = CONV_352P8K_TO_384K;
+ } else if (main_sr > 8 && mix_sr < 8) {
+ /* Fractional main and Integer mix path */
+ if (mix_sr < 5)
+ asrc_mode = CONV_96K_TO_352P8K;
+ else
+ asrc_mode = CONV_384K_TO_352P8K;
+ } else if (main_sr < 8 && mix_sr < 8) {
+ /* Integer main and Integer mix path */
+ asrc_mode = CONV_96K_TO_384K;
+ }
+ }
+
+ return asrc_mode;
+}
+
static int tavil_codec_enable_asrc(struct snd_soc_codec *codec,
int asrc_in, int event)
{
@@ -2647,19 +2716,8 @@ static int tavil_codec_enable_asrc(struct snd_soc_codec *codec,
main_sr = snd_soc_read(codec, ctl_reg) & 0x0F;
mix_ctl_reg = ctl_reg + 5;
mix_sr = snd_soc_read(codec, mix_ctl_reg) & 0x0F;
- /* Integer main and Fractional mix path */
- if (main_sr < 8 && mix_sr > 9) {
- asrc_mode = 2;
- } else if (main_sr > 8 && mix_sr < 8) {
- /* Fractional main and Integer mix path */
- if (mix_sr < 5)
- asrc_mode = 1;
- else
- asrc_mode = 3;
- } else if (main_sr < 8 && mix_sr < 8) {
- /* Integer main and Integer mix path */
- asrc_mode = 5;
- }
+ asrc_mode = tavil_get_asrc_mode(tavil, asrc,
+ main_sr, mix_sr);
dev_dbg(codec->dev, "%s: main_sr:%d mix_sr:%d asrc_mode %d\n",
__func__, main_sr, mix_sr, asrc_mode);
snd_soc_update_bits(codec, asrc_ctl, 0x07, asrc_mode);
@@ -2820,11 +2878,15 @@ static void tavil_codec_hphdelay_lutbypass(struct snd_soc_codec *codec,
}
}
-static void tavil_codec_hd2_control(struct snd_soc_codec *codec,
+static void tavil_codec_hd2_control(struct tavil_priv *priv,
u16 interp_idx, int event)
{
u16 hd2_scale_reg;
u16 hd2_enable_reg = 0;
+ struct snd_soc_codec *codec = priv->codec;
+
+ if (TAVIL_IS_1_1(priv->wcd9xxx))
+ return;
switch (interp_idx) {
case INTERP_HPHL:
@@ -3002,7 +3064,7 @@ int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, main_reg, 0x20, 0x20);
tavil_codec_idle_detect_control(codec, interp_idx,
event);
- tavil_codec_hd2_control(codec, interp_idx, event);
+ tavil_codec_hd2_control(tavil, interp_idx, event);
tavil_codec_hphdelay_lutbypass(codec, interp_idx,
event);
tavil_config_compander(codec, interp_idx, event);
@@ -3017,7 +3079,7 @@ int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
tavil_config_compander(codec, interp_idx, event);
tavil_codec_hphdelay_lutbypass(codec, interp_idx,
event);
- tavil_codec_hd2_control(codec, interp_idx, event);
+ tavil_codec_hd2_control(tavil, interp_idx, event);
tavil_codec_idle_detect_control(codec, interp_idx,
event);
/* Clk Disable */
@@ -3545,7 +3607,7 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
struct hpf_work *hpf_work;
struct tavil_priv *tavil;
struct snd_soc_codec *codec;
- u16 dec_cfg_reg, amic_reg;
+ u16 dec_cfg_reg, amic_reg, go_bit_reg;
u8 hpf_cut_off_freq;
int amic_n;
@@ -3556,6 +3618,7 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
hpf_cut_off_freq = hpf_work->hpf_cut_off_freq;
dec_cfg_reg = WCD934X_CDC_TX0_TX_PATH_CFG0 + 16 * hpf_work->decimator;
+ go_bit_reg = dec_cfg_reg + 7;
dev_dbg(codec->dev, "%s: decimator %u hpf_cut_of_freq 0x%x\n",
__func__, hpf_work->decimator, hpf_cut_off_freq);
@@ -3567,6 +3630,10 @@ static void tavil_tx_hpf_corner_freq_callback(struct work_struct *work)
}
snd_soc_update_bits(codec, dec_cfg_reg, TX_HPF_CUT_OFF_FREQ_MASK,
hpf_cut_off_freq << 5);
+ snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x02);
+ /* Minimum 1 clk cycle delay is required as per HW spec */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, go_bit_reg, 0x02, 0x00);
}
static void tavil_tx_mute_update_callback(struct work_struct *work)
@@ -3586,7 +3653,6 @@ static void tavil_tx_mute_update_callback(struct work_struct *work)
16 * tx_mute_dwork->decimator;
hpf_gate_reg = WCD934X_CDC_TX0_TX_PATH_SEC2 +
16 * tx_mute_dwork->decimator;
- snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x01);
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x00);
}
@@ -3673,20 +3739,27 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
break;
}
}
+ /* Enable TX PGA Mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
hpf_cut_off_freq = (snd_soc_read(codec, dec_cfg_reg) &
TX_HPF_CUT_OFF_FREQ_MASK) >> 5;
tavil->tx_hpf_work[decimator].hpf_cut_off_freq =
hpf_cut_off_freq;
- if (hpf_cut_off_freq != CF_MIN_3DB_150HZ)
+ if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
snd_soc_update_bits(codec, dec_cfg_reg,
TX_HPF_CUT_OFF_FREQ_MASK,
CF_MIN_3DB_150HZ << 5);
- /* Enable TX PGA Mute */
- snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
- break;
- case SND_SOC_DAPM_POST_PMU:
- snd_soc_update_bits(codec, hpf_gate_reg, 0x01, 0x00);
+ snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x02);
+ /*
+ * Minimum 1 clk cycle delay is required as per
+ * HW spec.
+ */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, hpf_gate_reg, 0x02, 0x00);
+ }
/* schedule work queue to Remove Mute */
schedule_delayed_work(&tavil->tx_mute_dwork[decimator].dwork,
msecs_to_jiffies(tx_unmute_delay));
@@ -3703,10 +3776,20 @@ static int tavil_codec_enable_dec(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x10, 0x10);
if (cancel_delayed_work_sync(
&tavil->tx_hpf_work[decimator].dwork)) {
- if (hpf_cut_off_freq != CF_MIN_3DB_150HZ)
+ if (hpf_cut_off_freq != CF_MIN_3DB_150HZ) {
snd_soc_update_bits(codec, dec_cfg_reg,
TX_HPF_CUT_OFF_FREQ_MASK,
hpf_cut_off_freq << 5);
+ snd_soc_update_bits(codec, hpf_gate_reg,
+ 0x02, 0x02);
+ /*
+ * Minimum 1 clk cycle delay is required as per
+ * HW spec.
+ */
+ usleep_range(1000, 1010);
+ snd_soc_update_bits(codec, hpf_gate_reg,
+ 0x02, 0x00);
+ }
}
cancel_delayed_work_sync(
&tavil->tx_mute_dwork[decimator].dwork);
@@ -4737,6 +4820,46 @@ static int tavil_compander_put(struct snd_kcontrol *kcontrol,
return 0;
}
+static int tavil_hph_asrc_mode_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+ int index = -EINVAL;
+
+ if (!strcmp(kcontrol->id.name, "ASRC0 Output Mode"))
+ index = ASRC0;
+ if (!strcmp(kcontrol->id.name, "ASRC1 Output Mode"))
+ index = ASRC1;
+
+ if (tavil && (index >= 0) && (index < ASRC_MAX))
+ tavil->asrc_output_mode[index] =
+ ucontrol->value.integer.value[0];
+
+ return 0;
+}
+
+static int tavil_hph_asrc_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
+ int val = 0;
+ int index = -EINVAL;
+
+ if (!strcmp(kcontrol->id.name, "ASRC0 Output Mode"))
+ index = ASRC0;
+ if (!strcmp(kcontrol->id.name, "ASRC1 Output Mode"))
+ index = ASRC1;
+
+ if (tavil && (index >= 0) && (index < ASRC_MAX))
+ val = tavil->asrc_output_mode[index];
+
+ ucontrol->value.integer.value[0] = val;
+
+ return 0;
+}
+
static int tavil_hph_idle_detect_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -5131,6 +5254,10 @@ static const char * const hph_idle_detect_text[] = {
"OFF", "ON"
};
+static const char * const asrc_mode_text[] = {
+ "INT", "FRAC"
+};
+
static const char * const tavil_ear_pa_gain_text[] = {
"G_6_DB", "G_4P5_DB", "G_3_DB", "G_1P5_DB",
"G_0_DB", "G_M2P5_DB", "UNDEFINED", "G_M12_DB"
@@ -5146,6 +5273,7 @@ static SOC_ENUM_SINGLE_EXT_DECL(tavil_ear_spkr_pa_gain_enum,
tavil_ear_spkr_pa_gain_text);
static SOC_ENUM_SINGLE_EXT_DECL(amic_pwr_lvl_enum, amic_pwr_lvl_text);
static SOC_ENUM_SINGLE_EXT_DECL(hph_idle_detect_enum, hph_idle_detect_text);
+static SOC_ENUM_SINGLE_EXT_DECL(asrc_mode_enum, asrc_mode_text);
static SOC_ENUM_SINGLE_DECL(cf_dec0_enum, WCD934X_CDC_TX0_TX_PATH_CFG0, 5,
cf_text);
static SOC_ENUM_SINGLE_DECL(cf_dec1_enum, WCD934X_CDC_TX1_TX_PATH_CFG0, 5,
@@ -5380,6 +5508,11 @@ static const struct snd_kcontrol_new tavil_snd_controls[] = {
SOC_SINGLE_EXT("COMP8 Switch", SND_SOC_NOPM, COMPANDER_8, 1, 0,
tavil_compander_get, tavil_compander_put),
+ SOC_ENUM_EXT("ASRC0 Output Mode", asrc_mode_enum,
+ tavil_hph_asrc_mode_get, tavil_hph_asrc_mode_put),
+ SOC_ENUM_EXT("ASRC1 Output Mode", asrc_mode_enum,
+ tavil_hph_asrc_mode_get, tavil_hph_asrc_mode_put),
+
SOC_ENUM_EXT("HPH Idle Detect", hph_idle_detect_enum,
tavil_hph_idle_detect_get, tavil_hph_idle_detect_put),
@@ -7837,7 +7970,11 @@ static const struct wcd_resmgr_cb tavil_resmgr_cb = {
.cdc_rco_ctrl = __tavil_codec_internal_rco_ctrl,
};
-static const struct tavil_reg_mask_val tavil_codec_mclk2_defaults[] = {
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_1_defaults[] = {
+ {WCD934X_CLK_SYS_MCLK2_PRG1, 0x60, 0x20},
+};
+
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_0_defaults[] = {
/*
* PLL Settings:
* Clock Root: MCLK2,
@@ -7896,6 +8033,32 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
{WCD934X_HPH_R_TEST, 0x01, 0x01},
};
+static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
+ {WCD934X_CDC_COMPANDER1_CTL7, 0x1E, 0x06},
+ {WCD934X_CDC_COMPANDER2_CTL7, 0x1E, 0x06},
+ {WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0xFF, 0x84},
+ {WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0xFF, 0x84},
+};
+
+static const struct tavil_cpr_reg_defaults cpr_defaults[] = {
+ { 0x00000820, 0x00000094 },
+ { 0x00000fC0, 0x00000048 },
+ { 0x0000f000, 0x00000044 },
+ { 0x0000bb80, 0xC0000178 },
+ { 0x00000000, 0x00000160 },
+ { 0x10854522, 0x00000060 },
+ { 0x10854509, 0x00000064 },
+ { 0x108544dd, 0x00000068 },
+ { 0x108544ad, 0x0000006C },
+ { 0x0000077E, 0x00000070 },
+ { 0x000007da, 0x00000074 },
+ { 0x00000000, 0x00000078 },
+ { 0x00000000, 0x0000007C },
+ { 0x00042029, 0x00000080 },
+ { 0x4002002A, 0x00000090 },
+ { 0x4002002B, 0x00000090 },
+};
+
static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CDC_CLSH_K2_MSB, 0x0F, 0x00},
{WCD934X_CDC_CLSH_K2_LSB, 0xFF, 0x60},
@@ -7922,8 +8085,9 @@ static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CPE_SS_SVA_CFG, 0x60, 0x00},
};
-static void tavil_codec_init_reg(struct snd_soc_codec *codec)
+static void tavil_codec_init_reg(struct tavil_priv *priv)
{
+ struct snd_soc_codec *codec = priv->codec;
u32 i;
for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_common_val); i++)
@@ -7931,6 +8095,14 @@ static void tavil_codec_init_reg(struct snd_soc_codec *codec)
tavil_codec_reg_init_common_val[i].reg,
tavil_codec_reg_init_common_val[i].mask,
tavil_codec_reg_init_common_val[i].val);
+
+ if (TAVIL_IS_1_1(priv->wcd9xxx)) {
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_1_1_val); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_reg_init_1_1_val[i].reg,
+ tavil_codec_reg_init_1_1_val[i].mask,
+ tavil_codec_reg_init_1_1_val[i].val);
+ }
}
static void tavil_update_reg_defaults(struct tavil_priv *tavil)
@@ -7946,6 +8118,33 @@ static void tavil_update_reg_defaults(struct tavil_priv *tavil)
tavil_codec_reg_defaults[i].val);
}
+static void tavil_update_cpr_defaults(struct tavil_priv *tavil)
+{
+ int i;
+ struct wcd9xxx *wcd9xxx;
+
+ wcd9xxx = tavil->wcd9xxx;
+ if (!TAVIL_IS_1_1(wcd9xxx))
+ return;
+
+ __tavil_cdc_mclk_enable(tavil, true);
+
+ regmap_write(wcd9xxx->regmap, WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD, 0x2C);
+ regmap_update_bits(wcd9xxx->regmap, WCD934X_CODEC_RPM_CLK_GATE,
+ 0x10, 0x00);
+
+ for (i = 0; i < ARRAY_SIZE(cpr_defaults); i++) {
+ regmap_bulk_write(wcd9xxx->regmap,
+ WCD934X_CODEC_CPR_WR_DATA_0,
+ (u8 *)&cpr_defaults[i].wr_data, 4);
+ regmap_bulk_write(wcd9xxx->regmap,
+ WCD934X_CODEC_CPR_WR_ADDR_0,
+ (u8 *)&cpr_defaults[i].wr_addr, 4);
+ }
+
+ __tavil_cdc_mclk_enable(tavil, false);
+}
+
static void tavil_slim_interface_init_reg(struct snd_soc_codec *codec)
{
int i;
@@ -8367,11 +8566,22 @@ static void tavil_mclk2_reg_defaults(struct tavil_priv *tavil)
int i;
struct snd_soc_codec *codec = tavil->codec;
- /* MCLK2 configuration */
- for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_defaults); i++)
- snd_soc_update_bits(codec, tavil_codec_mclk2_defaults[i].reg,
- tavil_codec_mclk2_defaults[i].mask,
- tavil_codec_mclk2_defaults[i].val);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx)) {
+ /* MCLK2 configuration */
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_0_defaults); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_mclk2_1_0_defaults[i].reg,
+ tavil_codec_mclk2_1_0_defaults[i].mask,
+ tavil_codec_mclk2_1_0_defaults[i].val);
+ }
+ if (TAVIL_IS_1_1(tavil->wcd9xxx)) {
+ /* MCLK2 configuration */
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_1_defaults); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_mclk2_1_1_defaults[i].reg,
+ tavil_codec_mclk2_1_1_defaults[i].mask,
+ tavil_codec_mclk2_1_1_defaults[i].val);
+ }
}
static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
@@ -8429,7 +8639,7 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
for (i = 0; i < COMPANDER_MAX; i++)
tavil->comp_enabled[i] = 0;
- tavil_codec_init_reg(codec);
+ tavil_codec_init_reg(tavil);
tavil_enable_sido_buck(codec);
pdata = dev_get_platdata(codec->dev->parent);
@@ -8749,6 +8959,9 @@ static int tavil_swrm_clock(void *handle, bool enable)
if (enable) {
tavil->swr.clk_users++;
if (tavil->swr.clk_users == 1) {
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+ 0x10, 0x00);
__tavil_cdc_mclk_enable(tavil, true);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
@@ -8761,6 +8974,9 @@ static int tavil_swrm_clock(void *handle, bool enable)
WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
0x01, 0x00);
__tavil_cdc_mclk_enable(tavil, false);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+ 0x10, 0x10);
}
}
dev_dbg(tavil->dev, "%s: swrm clock users %d\n",
@@ -9170,6 +9386,7 @@ static int tavil_probe(struct platform_device *pdev)
tavil_update_reg_defaults(tavil);
__tavil_enable_efuse_sensing(tavil);
___tavil_get_codec_fine_version(tavil);
+ tavil_update_cpr_defaults(tavil);
/* Register with soc framework */
ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tavil,
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c
index 63872bbf540c..47518ec92661 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.c
+++ b/sound/soc/codecs/wcd9xxx-common-v2.c
@@ -369,8 +369,9 @@ static inline void wcd_clsh_gm3_boost_disable(struct snd_soc_codec *codec,
if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
mode == CLS_AB_HIFI || mode == CLS_AB) {
- snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
- 0x80, 0x0); /* disable GM3 Boost */
+ if (TAVIL_IS_1_0(wcd9xxx))
+ snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
+ 0x80, 0x0); /* disable GM3 Boost */
snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_4,
0xF0, 0x80);
} else {
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 29718a8d7c04..39ca965e791e 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -307,7 +307,7 @@ static int wcd_resmgr_disable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
WCD9335_ANA_CLK_TOP,
0x04, 0x00);
wcd_resmgr_codec_reg_update_bits(resmgr,
- WCD934X_CLK_SYS_MCLK_PRG, 0x01, 0x0);
+ WCD934X_CLK_SYS_MCLK_PRG, 0x81, 0x00);
resmgr->clk_type = WCD_CLK_OFF;
}
diff --git a/sound/soc/msm/msmcobalt.c b/sound/soc/msm/msmcobalt.c
index 5c8d91bfe400..c82e0ad13db3 100644
--- a/sound/soc/msm/msmcobalt.c
+++ b/sound/soc/msm/msmcobalt.c
@@ -512,10 +512,10 @@ static struct wcd_mbhc_config wcd_mbhc_cfg = {
};
static struct snd_soc_dapm_route wcd_audio_paths[] = {
- {"MIC BIAS1", NULL, "MCLK"},
- {"MIC BIAS2", NULL, "MCLK"},
- {"MIC BIAS3", NULL, "MCLK"},
- {"MIC BIAS4", NULL, "MCLK"},
+ {"MIC BIAS1", NULL, "MCLK TX"},
+ {"MIC BIAS2", NULL, "MCLK TX"},
+ {"MIC BIAS3", NULL, "MCLK TX"},
+ {"MIC BIAS4", NULL, "MCLK TX"},
};
static struct afe_clk_set mi2s_clk[MI2S_MAX] = {
@@ -2463,6 +2463,37 @@ static int msm_snd_enable_codec_ext_clk(struct snd_soc_codec *codec,
return ret;
}
+static int msm_snd_enable_codec_ext_tx_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+
+ if (!strcmp(dev_name(codec->dev), "tasha_codec"))
+ ret = tasha_cdc_mclk_tx_enable(codec, enable, dapm);
+ else {
+ dev_err(codec->dev, "%s: unknown codec to enable ext clk\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int msm_mclk_tx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ pr_debug("%s: event = %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 1, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 0, true);
+ }
+ return 0;
+}
+
static int msm_mclk_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -2485,6 +2516,9 @@ static const struct snd_soc_dapm_widget msm_dapm_widgets[] = {
msm_mclk_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MCLK TX", SND_SOC_NOPM, 0, 0,
+ msm_mclk_tx_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_SPK("Lineout_1 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_3 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_2 amp", NULL),
@@ -3180,8 +3214,10 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
* Send speaker configuration only for WSA8810.
* Defalut configuration is for WSA8815.
*/
+ pr_debug("%s: Number of aux devices: %d\n",
+ __func__, rtd->card->num_aux_devs);
if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) {
- if (rtd_aux && rtd_aux->component)
+ if (rtd->card->num_aux_devs && rtd_aux && rtd_aux->component)
if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
!strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1);
@@ -3200,7 +3236,7 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
pdata->codec_root = entry;
tavil_codec_info_create_codec_entry(pdata->codec_root, codec);
} else {
- if (rtd_aux && rtd_aux->component)
+ if (rtd->card->num_aux_devs && rtd_aux && rtd_aux->component)
if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) ||
!strcmp(rtd_aux->component->name, WSA8810_NAME_2)) {
tasha_set_spkr_mode(rtd->codec, SPKR_MODE_1);
@@ -4630,6 +4666,20 @@ static struct snd_soc_dai_link msm_tasha_fe_dai_links[] = {
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
},
+ /* CPE LSM EC PP direct dai-link */
+ {
+ .name = "CPE Listen service ECPP",
+ .stream_name = "CPE Listen Audio Service ECPP",
+ .cpu_dai_name = "CPE_LSM_NOHOST",
+ .platform_name = "msm-cpe-lsm.3",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST,
+ SND_SOC_DPCM_TRIGGER_POST},
+ .no_host_mode = SND_SOC_DAI_LINK_NO_HOST,
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1,
+ .codec_dai_name = "tasha_cpe",
+ .codec_name = "tasha_codec",
+ },
};
static struct snd_soc_dai_link msm_tavil_fe_dai_links[] = {
@@ -5111,6 +5161,21 @@ static struct snd_soc_dai_link msm_tavil_be_dai_links[] = {
.ignore_pmdown_time = 1,
.ignore_suspend = 1,
},
+ /* MAD BE */
+ {
+ .name = LPASS_BE_SLIMBUS_5_TX,
+ .stream_name = "Slimbus5 Capture",
+ .cpu_dai_name = "msm-dai-q6-dev.16395",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "tavil_codec",
+ .codec_dai_name = "tavil_mad1",
+ .no_pcm = 1,
+ .dpcm_capture = 1,
+ .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ .be_hw_params_fixup = msm_be_hw_params_fixup,
+ .ops = &msm_be_ops,
+ .ignore_suspend = 1,
+ },
{
.name = LPASS_BE_SLIMBUS_6_RX,
.stream_name = "Slimbus6 Playback",
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 5c40c55a4a0c..547af163c5c0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -253,218 +253,218 @@ static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
+ { RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_5_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
+ { VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
+ { MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_TX},
- { SLIMBUS_6_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+ { AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX_SD1},
- { SLIMBUS_5_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { SLIMBUS_7_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { AFE_PORT_ID_USB_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+ { SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+ { AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
+ { AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
+ { DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_TX},
};
@@ -2006,11 +2006,20 @@ static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol,
static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- if (test_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions))
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ if (test_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -2024,22 +2033,32 @@ static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- pr_debug("%s: reg 0x%x shift 0x%x val %ld\n", __func__, mc->reg,
- mc->shift, ucontrol->value.integer.value[0]);
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: reg 0x%x shift 0x%x val %ld idx %d reminder shift %d\n",
+ __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0], idx, shift);
if (ucontrol->value.integer.value[0]) {
afe_loopback(1, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- set_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ set_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
} else {
afe_loopback(0, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- clear_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ clear_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
}
return 1;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 6b7f2113e0f6..8e3086849d92 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -355,6 +355,7 @@ enum {
#define ADM_PP_PARAM_MUTE_BIT 1
#define ADM_PP_PARAM_LATENCY_ID 1
#define ADM_PP_PARAM_LATENCY_BIT 2
+#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
struct msm_pcm_routing_evt {
void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -365,10 +366,15 @@ struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
unsigned long fe_sessions; /* Front-end sessions */
- u64 port_sessions; /* track Tx BE ports -> Rx BE
- * number of BE should not exceed
- * the size of this field
- */
+ /*
+ * Track Tx BE ports -> Rx BE ports.
+ * port_sessions[0] used to track BE 0 to BE 63.
+ * port_sessions[1] used to track BE 64 to BE 127.
+ * port_sessions[2] used to track BE 128 to BE 191.
+ * port_sessions[3] used to track BE 192 to BE 255.
+ */
+ u64 port_sessions[BE_DAI_PORT_SESSIONS_IDX_MAX];
+
unsigned int sample_rate;
unsigned int channel;
unsigned int format;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 8337d11bad12..3a108eba3b01 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -50,7 +50,7 @@
#define IOVA_XFER_RING_MAX (IOVA_XFER_BUF_BASE - PAGE_SIZE)
#define IOVA_XFER_BUF_MAX (0xfffff000 - PAGE_SIZE)
-#define MAX_XFER_BUFF_LEN (2 * PAGE_SIZE)
+#define MAX_XFER_BUFF_LEN (24 * PAGE_SIZE)
struct iova_info {
struct list_head list;